From 3c37022e994fc22172b7f260bc49f3cddca7bb60 Mon Sep 17 00:00:00 2001 From: Danny Canter <36526702+dcantah@users.noreply.github.com> Date: Fri, 3 Jun 2022 16:30:47 -0700 Subject: [PATCH] Remove vendor dir in /test (#1417) Given this slows down development both for us and external contributors as for most changes one would need to run `go mod vendor` in /test to bring in the latest local hcsshim changes, I think it's time we removed this. Pros: 1. Easier for automated tooling like dependabot, and more recently a Microsoft security bot, to make PRs that can just be checked in. All of these automated PRs tend to fail as the bot doesn't know it would need to run go mod vendor in /test as well for our repo. The approach today to check these in is typically someone manually checks out the branch dependabot (or whatever other bot) made, vendor to test, and then push a new commit to those automated PRs and then we can check them in. 2. Speeds up development flow as we don't need to go mod vendor in test before pushing almost every change. 3. Speeds up external contributions as well as there's no extra step to follow to make a change to most things in /internal anymore. We state that this needs to be done in our README, but it's probably a testament to how odd our setup is that it's missed here and there. Cons: 1. We lose the main selling point of vendoring for our test dependencies which is that if one of our dependencies is no longer accessible (deleted, renamed, whatever else) we don't have a local copy included in our repo. This will increase our dependence on the Go modules proxy server which seems like a fair tradeoff, and I think we're fine with this for test dependencies at least. I've removed the references to this extra step in the README as well as got rid of the CI step verifying that the vendor dir was up to date. I don't think we needed the mod=vendor env var either, as since go 1.14 if there's a top level vendor folder I believe the flag is transparently set for commands that accept it. Signed-off-by: Daniel Canter --- .github/workflows/ci.yml | 37 +- README.md | 18 - .../github.com/Microsoft/go-winio/README.md | 37 - .../Microsoft/go-winio/backuptar/tar.go | 517 --- .../github.com/Microsoft/go-winio/file.go | 329 -- .../github.com/Microsoft/go-winio/hvsock.go | 316 -- .../Microsoft/go-winio/pkg/guid/guid.go | 228 -- .../go-winio/pkg/guid/guid_nonwindows.go | 15 - .../go-winio/pkg/guid/guid_windows.go | 10 - .../Microsoft/go-winio/privilege.go | 203 -- .../github.com/Microsoft/go-winio/vhd/vhd.go | 350 --- .../Microsoft/go-winio/vhd/zvhd_windows.go | 106 - .../Microsoft/hcsshim/.gitattributes | 3 - .../github.com/Microsoft/hcsshim/.gitignore | 45 - .../Microsoft/hcsshim/.golangci.yml | 110 - .../github.com/Microsoft/hcsshim/Makefile | 121 - .../Microsoft/hcsshim/Protobuild.toml | 48 - .../github.com/Microsoft/hcsshim/README.md | 120 - .../stats/stats.pb.go | 2780 ----------------- .../hcsshim/computestorage/attach.go | 40 - .../hcsshim/computestorage/destroy.go | 28 - .../hcsshim/computestorage/detach.go | 28 - .../hcsshim/computestorage/export.go | 48 - .../hcsshim/computestorage/format.go | 77 - .../hcsshim/computestorage/helpers.go | 197 -- .../hcsshim/computestorage/import.go | 43 - .../hcsshim/computestorage/initialize.go | 40 - .../Microsoft/hcsshim/computestorage/mount.go | 28 - .../Microsoft/hcsshim/computestorage/setup.go | 76 - .../github.com/Microsoft/hcsshim/container.go | 225 -- .../github.com/Microsoft/hcsshim/errors.go | 250 -- .../hcsshim/ext4/dmverity/dmverity.go | 248 -- .../ext4/internal/compactext4/compact.go | 1348 -------- .../github.com/Microsoft/hcsshim/hcn/doc.go | 3 - .../github.com/Microsoft/hcsshim/hcn/hcn.go | 328 -- .../Microsoft/hcsshim/hcn/hcnendpoint.go | 390 --- .../Microsoft/hcsshim/hcn/hcnerrors.go | 164 - .../Microsoft/hcsshim/hcn/hcnglobals.go | 140 - .../Microsoft/hcsshim/hcn/hcnloadbalancer.go | 313 -- .../Microsoft/hcsshim/hcn/hcnnamespace.go | 448 --- .../Microsoft/hcsshim/hcn/hcnnetwork.go | 464 --- .../Microsoft/hcsshim/hcn/hcnpolicy.go | 346 -- .../Microsoft/hcsshim/hcn/hcnroute.go | 268 -- .../Microsoft/hcsshim/hcn/hcnsupport.go | 150 - .../github.com/Microsoft/hcsshim/hcsshim.go | 30 - .../Microsoft/hcsshim/hnsendpoint.go | 120 - .../Microsoft/hcsshim/hnsglobals.go | 18 - .../Microsoft/hcsshim/hnsnetwork.go | 38 - .../Microsoft/hcsshim/hnspolicylist.go | 49 - .../Microsoft/hcsshim/hnssupport.go | 15 - .../github.com/Microsoft/hcsshim/interface.go | 116 - .../Microsoft/hcsshim/internal/clone/doc.go | 1 - .../hcsshim/internal/clone/registry.go | 170 - .../Microsoft/hcsshim/internal/cmd/cmd.go | 321 -- .../Microsoft/hcsshim/internal/cmd/diag.go | 68 - .../Microsoft/hcsshim/internal/cmd/doc.go | 3 - .../Microsoft/hcsshim/internal/cmd/io.go | 82 - .../hcsshim/internal/cmd/io_binary.go | 290 -- .../hcsshim/internal/cmd/io_npipe.go | 296 -- .../Microsoft/hcsshim/internal/cni/doc.go | 1 - .../hcsshim/internal/cni/registry.go | 112 - .../internal/computeagent/computeagent.pb.go | 2673 ---------------- .../internal/computeagent/computeagent.proto | 63 - .../hcsshim/internal/computeagent/doc.go | 10 - .../hcsshim/internal/copyfile/copyfile.go | 54 - .../hcsshim/internal/copyfile/doc.go | 1 - .../Microsoft/hcsshim/internal/cow/cow.go | 93 - .../hcsshim/internal/cpugroup/cpugroup.go | 78 - .../hcsshim/internal/cpugroup/doc.go | 1 - .../internal/credentials/credentials.go | 130 - .../hcsshim/internal/credentials/doc.go | 4 - .../internal/devices/assigned_devices.go | 107 - .../Microsoft/hcsshim/internal/devices/doc.go | 1 - .../hcsshim/internal/devices/drivers.go | 96 - .../Microsoft/hcsshim/internal/devices/pnp.go | 125 - .../hcsshim/internal/extendedtask/doc.go | 1 - .../internal/extendedtask/extendedtask.pb.go | 550 ---- .../internal/extendedtask/extendedtask.proto | 17 - .../Microsoft/hcsshim/internal/gcs/bridge.go | 458 --- .../hcsshim/internal/gcs/container.go | 246 -- .../Microsoft/hcsshim/internal/gcs/doc.go | 1 - .../hcsshim/internal/gcs/guestconnection.go | 310 -- .../Microsoft/hcsshim/internal/gcs/process.go | 293 -- .../hcsshim/internal/gcs/protocol.go | 371 --- .../hcsshim/internal/guest/spec/doc.go | 3 - .../hcsshim/internal/guest/spec/spec.go | 90 - .../hcsshim/internal/guestpath/paths.go | 30 - .../hcsshim/internal/hcs/callback.go | 163 - .../Microsoft/hcsshim/internal/hcs/doc.go | 1 - .../Microsoft/hcsshim/internal/hcs/errors.go | 348 --- .../Microsoft/hcsshim/internal/hcs/process.go | 558 ---- .../hcsshim/internal/hcs/schema1/schema1.go | 252 -- .../hcs/schema2/modify_setting_request.go | 22 - .../Microsoft/hcsshim/internal/hcs/service.go | 51 - .../Microsoft/hcsshim/internal/hcs/system.go | 814 ----- .../Microsoft/hcsshim/internal/hcs/utils.go | 64 - .../hcsshim/internal/hcs/waithelper.go | 70 - .../hcsshim/internal/hcserror/doc.go | 1 - .../hcsshim/internal/hcserror/hcserror.go | 49 - .../hcsshim/internal/hcsoci/clone.go | 47 - .../hcsshim/internal/hcsoci/create.go | 476 --- .../hcsshim/internal/hcsoci/devices.go | 278 -- .../Microsoft/hcsshim/internal/hcsoci/doc.go | 1 - .../hcsshim/internal/hcsoci/hcsdoc_lcow.go | 99 - .../hcsshim/internal/hcsoci/hcsdoc_wcow.go | 485 --- .../hcsshim/internal/hcsoci/network.go | 51 - .../hcsshim/internal/hcsoci/resources_lcow.go | 178 -- .../hcsshim/internal/hcsoci/resources_wcow.go | 235 -- .../Microsoft/hcsshim/internal/hns/doc.go | 1 - .../hcsshim/internal/hns/hnsendpoint.go | 338 -- .../hcsshim/internal/hns/hnsfuncs.go | 51 - .../hcsshim/internal/hns/hnsglobals.go | 30 - .../hcsshim/internal/hns/hnsnetwork.go | 144 - .../hcsshim/internal/hns/hnspolicy.go | 110 - .../hcsshim/internal/hns/hnspolicylist.go | 203 -- .../hcsshim/internal/hns/hnssupport.go | 51 - .../hcsshim/internal/hns/namespace.go | 113 - .../Microsoft/hcsshim/internal/hooks/spec.go | 53 - .../Microsoft/hcsshim/internal/interop/doc.go | 1 - .../hcsshim/internal/interop/interop.go | 25 - .../hcsshim/internal/jobobject/doc.go | 8 - .../hcsshim/internal/jobobject/iocp.go | 113 - .../hcsshim/internal/jobobject/limits.go | 317 -- .../Microsoft/hcsshim/internal/layers/doc.go | 2 - .../Microsoft/hcsshim/internal/lcow/common.go | 66 - .../Microsoft/hcsshim/internal/lcow/disk.go | 55 - .../Microsoft/hcsshim/internal/lcow/doc.go | 1 - .../hcsshim/internal/lcow/scratch.go | 154 - .../Microsoft/hcsshim/internal/log/context.go | 116 - .../Microsoft/hcsshim/internal/log/hook.go | 45 - .../Microsoft/hcsshim/internal/log/scrub.go | 194 -- .../hcsshim/internal/logfields/fields.go | 61 - .../Microsoft/hcsshim/internal/memory/pool.go | 316 -- .../hcsshim/internal/memory/types.go | 28 - .../internal/ncproxy/networking/endpoints.go | 33 - .../internal/ncproxy/networking/networks.go | 17 - .../ncproxyttrpc/networkconfigproxy.pb.go | 1311 -------- .../Microsoft/hcsshim/internal/oc/span.go | 48 - .../hcsshim/internal/oci/annotations.go | 150 - .../Microsoft/hcsshim/internal/oci/sandbox.go | 54 - .../Microsoft/hcsshim/internal/oci/uvm.go | 347 -- .../hcsshim/internal/processorinfo/doc.go | 1 - .../processorinfo/host_information.go | 34 - .../internal/processorinfo/processor_count.go | 19 - .../internal/protocol/guestrequest/types.go | 56 - .../protocol/guestresource/resources.go | 160 - .../Microsoft/hcsshim/internal/queue/mq.go | 111 - .../hcsshim/internal/regstate/doc.go | 1 - .../hcsshim/internal/regstate/regstate.go | 290 -- .../hcsshim/internal/resources/doc.go | 3 - .../hcsshim/internal/resources/resources.go | 167 - .../hcsshim/internal/runhcs/container.go | 73 - .../Microsoft/hcsshim/internal/runhcs/vm.go | 45 - .../Microsoft/hcsshim/internal/safefile/do.go | 1 - .../hcsshim/internal/safefile/safeopen.go | 404 --- .../hcsshim/internal/schemaversion/doc.go | 1 - .../internal/schemaversion/schemaversion.go | 102 - .../internal/security/grantvmgroupaccess.go | 192 -- .../internal/security/syscall_windows.go | 7 - .../internal/security/zsyscall_windows.go | 70 - .../hcsshim/internal/shimdiag/shimdiag.go | 89 - .../hcsshim/internal/shimdiag/shimdiag.pb.go | 1913 ------------ .../hcsshim/internal/uvm/capabilities.go | 26 - .../Microsoft/hcsshim/internal/uvm/clone.go | 141 - .../hcsshim/internal/uvm/combine_layers.go | 91 - .../hcsshim/internal/uvm/computeagent.go | 259 -- .../hcsshim/internal/uvm/constants.go | 30 - .../Microsoft/hcsshim/internal/uvm/counter.go | 19 - .../hcsshim/internal/uvm/cpugroups.go | 55 - .../hcsshim/internal/uvm/cpulimits_update.go | 20 - .../Microsoft/hcsshim/internal/uvm/create.go | 408 --- .../hcsshim/internal/uvm/create_lcow.go | 819 ----- .../hcsshim/internal/uvm/create_wcow.go | 385 --- .../hcsshim/internal/uvm/delete_container.go | 16 - .../Microsoft/hcsshim/internal/uvm/doc.go | 2 - .../hcsshim/internal/uvm/dumpstacks.go | 15 - .../hcsshim/internal/uvm/guest_request.go | 17 - .../hcsshim/internal/uvm/hvsocket.go | 46 - .../hcsshim/internal/uvm/memory_update.go | 47 - .../Microsoft/hcsshim/internal/uvm/modify.go | 50 - .../Microsoft/hcsshim/internal/uvm/network.go | 712 ----- .../Microsoft/hcsshim/internal/uvm/pipes.go | 72 - .../Microsoft/hcsshim/internal/uvm/plan9.go | 140 - .../Microsoft/hcsshim/internal/uvm/scsi.go | 732 ----- .../hcsshim/internal/uvm/security_policy.go | 56 - .../Microsoft/hcsshim/internal/uvm/share.go | 74 - .../Microsoft/hcsshim/internal/uvm/start.go | 317 -- .../Microsoft/hcsshim/internal/uvm/stats.go | 158 - .../hcsshim/internal/uvm/timezone.go | 60 - .../Microsoft/hcsshim/internal/uvm/types.go | 149 - .../hcsshim/internal/uvm/update_uvm.go | 66 - .../hcsshim/internal/uvm/virtual_device.go | 175 -- .../Microsoft/hcsshim/internal/uvm/vpmem.go | 264 -- .../hcsshim/internal/uvm/vpmem_mapped.go | 321 -- .../Microsoft/hcsshim/internal/uvm/vsmb.go | 435 --- .../Microsoft/hcsshim/internal/uvm/wait.go | 20 - .../hcsshim/internal/uvmfolder/doc.go | 1 - .../hcsshim/internal/vmcompute/doc.go | 1 - .../hcsshim/internal/vmcompute/vmcompute.go | 618 ---- .../hcsshim/internal/wclayer/activatelayer.go | 29 - .../hcsshim/internal/wclayer/baselayer.go | 183 -- .../hcsshim/internal/wclayer/createlayer.go | 29 - .../internal/wclayer/createscratchlayer.go | 36 - .../internal/wclayer/deactivatelayer.go | 26 - .../hcsshim/internal/wclayer/destroylayer.go | 27 - .../Microsoft/hcsshim/internal/wclayer/doc.go | 4 - .../internal/wclayer/expandscratchsize.go | 142 - .../hcsshim/internal/wclayer/exportlayer.go | 101 - .../internal/wclayer/getlayermountpath.go | 52 - .../internal/wclayer/getsharedbaseimages.go | 31 - .../hcsshim/internal/wclayer/grantvmaccess.go | 28 - .../hcsshim/internal/wclayer/importlayer.go | 168 - .../hcsshim/internal/wclayer/layerexists.go | 30 - .../hcsshim/internal/wclayer/layerid.go | 24 - .../hcsshim/internal/wclayer/layerutils.go | 99 - .../hcsshim/internal/wclayer/legacy.go | 812 ----- .../hcsshim/internal/wclayer/nametoguid.go | 31 - .../hcsshim/internal/wclayer/preparelayer.go | 46 - .../hcsshim/internal/wclayer/processimage.go | 43 - .../internal/wclayer/unpreparelayer.go | 27 - .../hcsshim/internal/wclayer/wclayer.go | 34 - .../Microsoft/hcsshim/internal/wcow/doc.go | 1 - .../hcsshim/internal/wcow/scratch.go | 27 - .../hcsshim/internal/winapi/bindflt.go | 20 - .../hcsshim/internal/winapi/console.go | 46 - .../hcsshim/internal/winapi/devices.go | 15 - .../Microsoft/hcsshim/internal/winapi/doc.go | 3 - .../hcsshim/internal/winapi/elevation.go | 11 - .../hcsshim/internal/winapi/errors.go | 17 - .../hcsshim/internal/winapi/filesystem.go | 112 - .../hcsshim/internal/winapi/jobobject.go | 224 -- .../Microsoft/hcsshim/internal/winapi/path.go | 12 - .../hcsshim/internal/winapi/process.go | 65 - .../hcsshim/internal/winapi/system.go | 55 - .../hcsshim/internal/winapi/thread.go | 13 - .../Microsoft/hcsshim/internal/winapi/user.go | 194 -- .../hcsshim/internal/winapi/utils.go | 82 - .../hcsshim/internal/winapi/winapi.go | 3 - .../internal/winapi/zsyscall_windows.go | 407 --- .../github.com/Microsoft/hcsshim/layer.go | 109 - .../hcsshim/osversion/osversion_windows.go | 52 - .../Microsoft/hcsshim/pkg/go-runhcs/doc.go | 1 - .../Microsoft/hcsshim/pkg/go-runhcs/runhcs.go | 175 -- .../pkg/go-runhcs/runhcs_create-scratch.go | 56 - .../hcsshim/pkg/go-runhcs/runhcs_create.go | 103 - .../hcsshim/pkg/go-runhcs/runhcs_delete.go | 35 - .../hcsshim/pkg/go-runhcs/runhcs_exec.go | 90 - .../hcsshim/pkg/go-runhcs/runhcs_kill.go | 13 - .../hcsshim/pkg/go-runhcs/runhcs_list.go | 30 - .../hcsshim/pkg/go-runhcs/runhcs_pause.go | 12 - .../hcsshim/pkg/go-runhcs/runhcs_ps.go | 22 - .../pkg/go-runhcs/runhcs_resize-tty.go | 35 - .../hcsshim/pkg/go-runhcs/runhcs_resume.go | 12 - .../hcsshim/pkg/go-runhcs/runhcs_start.go | 12 - .../hcsshim/pkg/go-runhcs/runhcs_state.go | 22 - .../Microsoft/hcsshim/pkg/ociwclayer/doc.go | 3 - .../hcsshim/pkg/ociwclayer/export.go | 86 - .../hcsshim/pkg/ociwclayer/import.go | 150 - .../hcsshim/pkg/octtrpc/interceptor.go | 117 - .../github.com/Microsoft/hcsshim/process.go | 100 - .../containerd/containerd/.golangci.yml | 27 - .../containerd/containerd/Vagrantfile | 260 -- .../containerd/containerd/oci/spec_opts.go | 1292 -------- .../containerd/containerd/version/version.go | 34 - test/vendor/github.com/docker/cli/AUTHORS | 723 ----- test/vendor/github.com/docker/cli/LICENSE | 191 -- test/vendor/github.com/docker/cli/NOTICE | 19 - .../docker/cli/cli/config/config.go | 140 - .../docker/cli/cli/config/configfile/file.go | 387 --- .../cli/cli/config/credentials/credentials.go | 17 - .../cli/config/credentials/default_store.go | 21 - .../credentials/default_store_darwin.go | 5 - .../config/credentials/default_store_linux.go | 13 - .../credentials/default_store_unsupported.go | 7 - .../credentials/default_store_windows.go | 5 - .../cli/cli/config/credentials/file_store.go | 81 - .../cli/config/credentials/native_store.go | 143 - .../docker/cli/cli/config/types/authconfig.go | 22 - .../github.com/docker/distribution/LICENSE | 202 -- .../registry/client/auth/challenge/addr.go | 27 - .../client/auth/challenge/authchallenge.go | 237 -- .../docker/docker-credential-helpers/LICENSE | 20 - .../client/client.go | 121 - .../client/command.go | 56 - .../credentials/credentials.go | 186 -- .../credentials/error.go | 102 - .../credentials/helper.go | 14 - .../credentials/version.go | 4 - test/vendor/github.com/docker/docker/AUTHORS | 2082 ------------ test/vendor/github.com/docker/docker/LICENSE | 191 -- test/vendor/github.com/docker/docker/NOTICE | 19 - .../docker/pkg/homedir/homedir_linux.go | 93 - .../docker/pkg/homedir/homedir_others.go | 27 - .../docker/docker/pkg/homedir/homedir_unix.go | 35 - .../docker/pkg/homedir/homedir_windows.go | 24 - .../google/go-containerregistry/LICENSE | 202 -- .../internal/and/and_closer.go | 48 - .../go-containerregistry/internal/gzip/zip.go | 117 - .../internal/redact/redact.go | 35 - .../internal/retry/retry.go | 77 - .../wait/kubernetes_apimachinery_wait.go | 123 - .../internal/verify/verify.go | 64 - .../go-containerregistry/pkg/authn/README.md | 242 -- .../go-containerregistry/pkg/authn/anon.go | 26 - .../go-containerregistry/pkg/authn/auth.go | 30 - .../go-containerregistry/pkg/authn/authn.go | 36 - .../go-containerregistry/pkg/authn/basic.go | 29 - .../go-containerregistry/pkg/authn/bearer.go | 27 - .../go-containerregistry/pkg/authn/doc.go | 17 - .../pkg/authn/keychain.go | 89 - .../pkg/authn/multikeychain.go | 41 - .../go-containerregistry/pkg/logs/logs.go | 39 - .../go-containerregistry/pkg/name/README.md | 3 - .../go-containerregistry/pkg/name/check.go | 43 - .../go-containerregistry/pkg/name/digest.go | 96 - .../go-containerregistry/pkg/name/doc.go | 42 - .../go-containerregistry/pkg/name/errors.go | 37 - .../go-containerregistry/pkg/name/options.go | 83 - .../go-containerregistry/pkg/name/ref.go | 76 - .../go-containerregistry/pkg/name/registry.go | 136 - .../pkg/name/repository.go | 121 - .../go-containerregistry/pkg/name/tag.go | 108 - .../go-containerregistry/pkg/v1/config.go | 133 - .../google/go-containerregistry/pkg/v1/doc.go | 18 - .../go-containerregistry/pkg/v1/hash.go | 123 - .../go-containerregistry/pkg/v1/image.go | 59 - .../go-containerregistry/pkg/v1/index.go | 43 - .../go-containerregistry/pkg/v1/layer.go | 42 - .../go-containerregistry/pkg/v1/manifest.go | 67 - .../pkg/v1/match/match.go | 90 - .../pkg/v1/partial/README.md | 82 - .../pkg/v1/partial/compressed.go | 163 - .../pkg/v1/partial/doc.go | 17 - .../pkg/v1/partial/image.go | 28 - .../pkg/v1/partial/index.go | 85 - .../pkg/v1/partial/uncompressed.go | 223 -- .../pkg/v1/partial/with.go | 389 --- .../go-containerregistry/pkg/v1/platform.go | 59 - .../go-containerregistry/pkg/v1/progress.go | 25 - .../pkg/v1/remote/README.md | 117 - .../pkg/v1/remote/catalog.go | 151 - .../pkg/v1/remote/check.go | 59 - .../pkg/v1/remote/delete.go | 57 - .../pkg/v1/remote/descriptor.go | 424 --- .../go-containerregistry/pkg/v1/remote/doc.go | 17 - .../pkg/v1/remote/image.go | 235 -- .../pkg/v1/remote/index.go | 261 -- .../pkg/v1/remote/layer.go | 93 - .../pkg/v1/remote/list.go | 146 - .../pkg/v1/remote/mount.go | 95 - .../pkg/v1/remote/multi_write.go | 298 -- .../pkg/v1/remote/options.go | 195 -- .../pkg/v1/remote/transport/README.md | 129 - .../pkg/v1/remote/transport/basic.go | 62 - .../pkg/v1/remote/transport/bearer.go | 311 -- .../pkg/v1/remote/transport/doc.go | 18 - .../pkg/v1/remote/transport/error.go | 197 -- .../pkg/v1/remote/transport/logger.go | 91 - .../pkg/v1/remote/transport/ping.go | 129 - .../pkg/v1/remote/transport/retry.go | 88 - .../pkg/v1/remote/transport/schemer.go | 44 - .../pkg/v1/remote/transport/scope.go | 24 - .../pkg/v1/remote/transport/transport.go | 103 - .../pkg/v1/remote/transport/useragent.go | 94 - .../pkg/v1/remote/write.go | 901 ------ .../pkg/v1/stream/README.md | 68 - .../pkg/v1/stream/layer.go | 242 -- .../pkg/v1/types/types.go | 71 - .../pkg/v1/zz_deepcopy_generated.go | 318 -- test/vendor/github.com/kevpar/cri/LICENSE | 201 -- .../kevpar/cri/pkg/annotations/annotations.go | 42 - .../kevpar/cri/pkg/api/v1/api.pb.go | 1344 -------- .../kevpar/cri/pkg/api/v1/api.proto | 54 - test/vendor/modules.txt | 415 --- 374 files changed, 7 insertions(+), 60065 deletions(-) delete mode 100644 test/vendor/github.com/Microsoft/go-winio/README.md delete mode 100644 test/vendor/github.com/Microsoft/go-winio/backuptar/tar.go delete mode 100644 test/vendor/github.com/Microsoft/go-winio/file.go delete mode 100644 test/vendor/github.com/Microsoft/go-winio/hvsock.go delete mode 100644 test/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go delete mode 100644 test/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go delete mode 100644 test/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go delete mode 100644 test/vendor/github.com/Microsoft/go-winio/privilege.go delete mode 100644 test/vendor/github.com/Microsoft/go-winio/vhd/vhd.go delete mode 100644 test/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/.gitattributes delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/.gitignore delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/.golangci.yml delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/Makefile delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/Protobuild.toml delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/README.md delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/computestorage/export.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/computestorage/format.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/computestorage/import.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/container.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/errors.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/ext4/dmverity/dmverity.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hcn/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hcsshim.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hnsglobals.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/hnssupport.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/interface.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/clone/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/clone/registry.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/cmd/cmd.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/cmd/diag.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/cmd/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/cmd/io.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/cmd/io_binary.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/cmd/io_npipe.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/cni/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/computeagent/computeagent.pb.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/computeagent/computeagent.proto delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/computeagent/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/copyfile/copyfile.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/copyfile/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/cpugroup/cpugroup.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/cpugroup/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/credentials/credentials.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/credentials/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/devices/assigned_devices.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/devices/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/devices/drivers.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/devices/pnp.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/extendedtask/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/extendedtask/extendedtask.pb.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/extendedtask/extendedtask.proto delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/gcs/bridge.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/gcs/container.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/gcs/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/gcs/guestconnection.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/gcs/process.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/gcs/protocol.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/guest/spec/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/guest/spec/spec.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/guestpath/paths.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/clone.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/devices.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_lcow.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/network.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/hooks/spec.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/layers/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/lcow/common.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/lcow/disk.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/lcow/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/log/context.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/ncproxy/networking/endpoints.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/ncproxy/networking/networks.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/ncproxyttrpc/networkconfigproxy.pb.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/oci/annotations.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/oci/sandbox.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/oci/uvm.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/processorinfo/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/processorinfo/host_information.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/processorinfo/processor_count.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestresource/resources.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/regstate/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/resources/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/resources/resources.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/shimdiag/shimdiag.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/shimdiag/shimdiag.pb.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/capabilities.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/clone.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/combine_layers.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/computeagent.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/counter.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/cpugroups.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/cpulimits_update.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/delete_container.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/dumpstacks.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/guest_request.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/hvsocket.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/memory_update.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/modify.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/pipes.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/plan9.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/security_policy.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/share.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/start.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/stats.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/timezone.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/update_uvm.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/virtual_device.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem_mapped.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/wait.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvmfolder/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wcow/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/wcow/scratch.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/layer.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/doc.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/octtrpc/interceptor.go delete mode 100644 test/vendor/github.com/Microsoft/hcsshim/process.go delete mode 100644 test/vendor/github.com/containerd/containerd/.golangci.yml delete mode 100644 test/vendor/github.com/containerd/containerd/Vagrantfile delete mode 100644 test/vendor/github.com/containerd/containerd/oci/spec_opts.go delete mode 100644 test/vendor/github.com/containerd/containerd/version/version.go delete mode 100644 test/vendor/github.com/docker/cli/AUTHORS delete mode 100644 test/vendor/github.com/docker/cli/LICENSE delete mode 100644 test/vendor/github.com/docker/cli/NOTICE delete mode 100644 test/vendor/github.com/docker/cli/cli/config/config.go delete mode 100644 test/vendor/github.com/docker/cli/cli/config/configfile/file.go delete mode 100644 test/vendor/github.com/docker/cli/cli/config/credentials/credentials.go delete mode 100644 test/vendor/github.com/docker/cli/cli/config/credentials/default_store.go delete mode 100644 test/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go delete mode 100644 test/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go delete mode 100644 test/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go delete mode 100644 test/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go delete mode 100644 test/vendor/github.com/docker/cli/cli/config/credentials/file_store.go delete mode 100644 test/vendor/github.com/docker/cli/cli/config/credentials/native_store.go delete mode 100644 test/vendor/github.com/docker/cli/cli/config/types/authconfig.go delete mode 100644 test/vendor/github.com/docker/distribution/LICENSE delete mode 100644 test/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go delete mode 100644 test/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go delete mode 100644 test/vendor/github.com/docker/docker-credential-helpers/LICENSE delete mode 100644 test/vendor/github.com/docker/docker-credential-helpers/client/client.go delete mode 100644 test/vendor/github.com/docker/docker-credential-helpers/client/command.go delete mode 100644 test/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go delete mode 100644 test/vendor/github.com/docker/docker-credential-helpers/credentials/error.go delete mode 100644 test/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go delete mode 100644 test/vendor/github.com/docker/docker-credential-helpers/credentials/version.go delete mode 100644 test/vendor/github.com/docker/docker/AUTHORS delete mode 100644 test/vendor/github.com/docker/docker/LICENSE delete mode 100644 test/vendor/github.com/docker/docker/NOTICE delete mode 100644 test/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go delete mode 100644 test/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go delete mode 100644 test/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go delete mode 100644 test/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/LICENSE delete mode 100644 test/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/internal/redact/redact.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/internal/retry/retry.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/internal/verify/verify.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/authn/README.md delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/name/README.md delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/name/check.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/name/digest.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/name/doc.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/name/errors.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/name/options.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/name/ref.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/name/registry.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/name/repository.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/name/tag.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/config.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/image.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/index.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go delete mode 100644 test/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go delete mode 100644 test/vendor/github.com/kevpar/cri/LICENSE delete mode 100644 test/vendor/github.com/kevpar/cri/pkg/annotations/annotations.go delete mode 100644 test/vendor/github.com/kevpar/cri/pkg/api/v1/api.pb.go delete mode 100644 test/vendor/github.com/kevpar/cri/pkg/api/v1/api.proto delete mode 100644 test/vendor/modules.txt diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 10732582c0..7eb3f131dc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,10 +3,6 @@ on: - push - pull_request -env: - GOFLAGS: -mod=vendor - GOPROXY: off - jobs: protos: runs-on: 'windows-2019' @@ -88,25 +84,6 @@ jobs: } exit $process.ExitCode - verify-test-vendor: - runs-on: 'windows-2019' - env: - GOPROXY: "https://proxy.golang.org,direct" - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-go@v2 - with: - go-version: '^1.17.0' - - name: Validate test modules - shell: powershell - run: | - $currentPath = (Get-Location).Path - $process = Start-Process powershell.exe -PassThru -Verb runAs -Wait -ArgumentList $currentPath/scripts/Verify-GoModules.ps1, $currentPath, "test" - if ($process.ExitCode -ne 0) { - Write-Error "Test package modules are not up to date. Please validate your go version >= this job's and run `go mod vendor` followed by `go mod tidy` in hcsshim/test directory." - } - exit $process.ExitCode - test: runs-on: ${{ matrix.os }} strategy: @@ -119,17 +96,17 @@ jobs: go-version: '^1.17.0' - run: go test -gcflags=all=-d=checkptr -v ./... -tags admin - - run: go test -gcflags=all=-d=checkptr -v ./internal -tags admin - working-directory: test - - run: go test -gcflags=all=-d=checkptr -c ./containerd-shim-runhcs-v1/ -tags functional + - run: go test -mod=mod -gcflags=all=-d=checkptr -v ./internal -tags admin + working-directory: test + - run: go test -mod=mod -gcflags=all=-d=checkptr -c ./containerd-shim-runhcs-v1/ -tags functional working-directory: test - - run: go test -gcflags=all=-d=checkptr -c ./cri-containerd/ -tags functional + - run: go test -mod=mod -gcflags=all=-d=checkptr -c ./cri-containerd/ -tags functional working-directory: test - - run: go test -gcflags=all=-d=checkptr -c ./functional/ -tags functional + - run: go test -mod=mod -gcflags=all=-d=checkptr -c ./functional/ -tags functional working-directory: test - - run: go test -gcflags=all=-d=checkptr -c ./runhcs/ -tags functional + - run: go test -mod=mod -gcflags=all=-d=checkptr -c ./runhcs/ -tags functional working-directory: test - - run: go build -o sample-logging-driver.exe ./cri-containerd/helpers/log.go + - run: go build -mod=mod -o sample-logging-driver.exe ./cri-containerd/helpers/log.go working-directory: test - uses: actions/upload-artifact@v2 diff --git a/README.md b/README.md index 690e2dab97..5a1361539b 100644 --- a/README.md +++ b/README.md @@ -75,24 +75,6 @@ certify they either authored the work themselves or otherwise have permission to more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure that all commits in a given PR are signed-off. -### Test Directory (Important to note) - -This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this -project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has -its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file -has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project -(which is the repo itself on your disk). - -``` -replace ( - github.com/Microsoft/hcsshim => ../ -) -``` - -Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the -CI in this project will check if the files are out of date and will fail if this is true. - - ## Code of Conduct This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). diff --git a/test/vendor/github.com/Microsoft/go-winio/README.md b/test/vendor/github.com/Microsoft/go-winio/README.md deleted file mode 100644 index 683be1dcf9..0000000000 --- a/test/vendor/github.com/Microsoft/go-winio/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) - -This repository contains utilities for efficiently performing Win32 IO operations in -Go. Currently, this is focused on accessing named pipes and other file handles, and -for using named pipes as a net transport. - -This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go -to reuse the thread to schedule another goroutine. This limits support to Windows Vista and -newer operating systems. This is similar to the implementation of network sockets in Go's net -package. - -Please see the LICENSE file for licensing information. - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) -declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR -appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. - -We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves -or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can -attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. - - -## Code of Conduct - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - - - -## Special Thanks -Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe -for another named pipe implementation. diff --git a/test/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/test/vendor/github.com/Microsoft/go-winio/backuptar/tar.go deleted file mode 100644 index 2342a7fcd6..0000000000 --- a/test/vendor/github.com/Microsoft/go-winio/backuptar/tar.go +++ /dev/null @@ -1,517 +0,0 @@ -// +build windows - -package backuptar - -import ( - "archive/tar" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - "github.com/Microsoft/go-winio" - "golang.org/x/sys/windows" -) - -const ( - c_ISUID = 04000 // Set uid - c_ISGID = 02000 // Set gid - c_ISVTX = 01000 // Save text (sticky bit) - c_ISDIR = 040000 // Directory - c_ISFIFO = 010000 // FIFO - c_ISREG = 0100000 // Regular file - c_ISLNK = 0120000 // Symbolic link - c_ISBLK = 060000 // Block special file - c_ISCHR = 020000 // Character special file - c_ISSOCK = 0140000 // Socket -) - -const ( - hdrFileAttributes = "MSWINDOWS.fileattr" - hdrSecurityDescriptor = "MSWINDOWS.sd" - hdrRawSecurityDescriptor = "MSWINDOWS.rawsd" - hdrMountPoint = "MSWINDOWS.mountpoint" - hdrEaPrefix = "MSWINDOWS.xattr." - - hdrCreationTime = "LIBARCHIVE.creationtime" -) - -// zeroReader is an io.Reader that always returns 0s. -type zeroReader struct{} - -func (zr zeroReader) Read(b []byte) (int, error) { - for i := range b { - b[i] = 0 - } - return len(b), nil -} - -func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { - curOffset := int64(0) - for { - bhdr, err := br.Next() - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - if err != nil { - return err - } - if bhdr.Id != winio.BackupSparseBlock { - return fmt.Errorf("unexpected stream %d", bhdr.Id) - } - - // We can't seek backwards, since we have already written that data to the tar.Writer. - if bhdr.Offset < curOffset { - return fmt.Errorf("cannot seek back from %d to %d", curOffset, bhdr.Offset) - } - // archive/tar does not support writing sparse files - // so just write zeroes to catch up to the current offset. - if _, err := io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil { - return fmt.Errorf("seek to offset %d: %s", bhdr.Offset, err) - } - if bhdr.Size == 0 { - // A sparse block with size = 0 is used to mark the end of the sparse blocks. - break - } - n, err := io.Copy(t, br) - if err != nil { - return err - } - if n != bhdr.Size { - return fmt.Errorf("copied %d bytes instead of %d at offset %d", n, bhdr.Size, bhdr.Offset) - } - curOffset = bhdr.Offset + n - } - return nil -} - -// BasicInfoHeader creates a tar header from basic file information. -func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header { - hdr := &tar.Header{ - Format: tar.FormatPAX, - Name: filepath.ToSlash(name), - Size: size, - Typeflag: tar.TypeReg, - ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()), - ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()), - AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()), - PAXRecords: make(map[string]string), - } - hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes) - hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds())) - - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - hdr.Mode |= c_ISDIR - hdr.Size = 0 - hdr.Typeflag = tar.TypeDir - } - return hdr -} - -// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file -// from the tar header and returns the security descriptor into a byte slice. -func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) { - // Maintaining old SDDL-based behavior for backward - // compatibility. All new tar headers written by this library - // will have raw binary for the security descriptor. - var sd []byte - var err error - if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { - sd, err = winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return nil, err - } - } - if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { - sd, err = base64.StdEncoding.DecodeString(sdraw) - if err != nil { - return nil, err - } - } - return sd, nil -} - -// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the -// current file from the tar header and returns it as a byte slice. -func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) { - var eas []winio.ExtendedAttribute - var eadata []byte - var err error - for k, v := range hdr.PAXRecords { - if !strings.HasPrefix(k, hdrEaPrefix) { - continue - } - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return nil, err - } - eas = append(eas, winio.ExtendedAttribute{ - Name: k[len(hdrEaPrefix):], - Value: data, - }) - } - if len(eas) != 0 { - eadata, err = winio.EncodeExtendedAttributes(eas) - if err != nil { - return nil, err - } - } - return eadata, nil -} - -// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header -// and encodes it into a byte slice. The file for which this function is called must be a -// symlink. -func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte { - _, isMountPoint := hdr.PAXRecords[hdrMountPoint] - rp := winio.ReparsePoint{ - Target: filepath.FromSlash(hdr.Linkname), - IsMountPoint: isMountPoint, - } - return winio.EncodeReparsePoint(&rp) -} - -// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. -// -// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. -// -// The additional Win32 metadata is: -// -// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value -// -// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format -// -// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) -func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { - name = filepath.ToSlash(name) - hdr := BasicInfoHeader(name, size, fileInfo) - - // If r can be seeked, then this function is two-pass: pass 1 collects the - // tar header data, and pass 2 copies the data stream. If r cannot be - // seeked, then some header data (in particular EAs) will be silently lost. - var ( - restartPos int64 - err error - ) - sr, readTwice := r.(io.Seeker) - if readTwice { - if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil { - readTwice = false - } - } - - br := winio.NewBackupStreamReader(r) - var dataHdr *winio.BackupHeader - for dataHdr == nil { - bhdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - switch bhdr.Id { - case winio.BackupData: - hdr.Mode |= c_ISREG - if !readTwice { - dataHdr = bhdr - } - case winio.BackupSecurity: - sd, err := ioutil.ReadAll(br) - if err != nil { - return err - } - hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) - - case winio.BackupReparseData: - hdr.Mode |= c_ISLNK - hdr.Typeflag = tar.TypeSymlink - reparseBuffer, err := ioutil.ReadAll(br) - rp, err := winio.DecodeReparsePoint(reparseBuffer) - if err != nil { - return err - } - if rp.IsMountPoint { - hdr.PAXRecords[hdrMountPoint] = "1" - } - hdr.Linkname = rp.Target - - case winio.BackupEaData: - eab, err := ioutil.ReadAll(br) - if err != nil { - return err - } - eas, err := winio.DecodeExtendedAttributes(eab) - if err != nil { - return err - } - for _, ea := range eas { - // Use base64 encoding for the binary value. Note that there - // is no way to encode the EA's flags, since their use doesn't - // make any sense for persisted EAs. - hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value) - } - - case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: - // ignore these streams - default: - return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id) - } - } - - err = t.WriteHeader(hdr) - if err != nil { - return err - } - - if readTwice { - // Get back to the data stream. - if _, err = sr.Seek(restartPos, io.SeekStart); err != nil { - return err - } - for dataHdr == nil { - bhdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - if bhdr.Id == winio.BackupData { - dataHdr = bhdr - } - } - } - - // The logic for copying file contents is fairly complicated due to the need for handling sparse files, - // and the weird ways they are represented by BackupRead. A normal file will always either have a data stream - // with size and content, or no data stream at all (if empty). However, for a sparse file, the content can also - // be represented using a series of sparse block streams following the data stream. Additionally, the way sparse - // files are handled by BackupRead has changed in the OS recently. The specifics of the representation are described - // in the list at the bottom of this block comment. - // - // Sparse files can be represented in four different ways, based on the specifics of the file. - // - Size = 0: - // Previously: BackupRead yields no data stream and no sparse block streams. - // Recently: BackupRead yields a data stream with size = 0. There are no following sparse block streams. - // - Size > 0, no allocated ranges: - // BackupRead yields a data stream with size = 0. Following is a single sparse block stream with - // size = 0 and offset = . - // - Size > 0, one allocated range: - // BackupRead yields a data stream with size = containing the file contents. There are no - // sparse block streams. This is the case if you take a normal file with contents and simply set the - // sparse flag on it. - // - Size > 0, multiple allocated ranges: - // BackupRead yields a data stream with size = 0. Following are sparse block streams for each allocated - // range of the file containing the range contents. Finally there is a sparse block stream with - // size = 0 and offset = . - - if dataHdr != nil { - // A data stream was found. Copy the data. - // We assume that we will either have a data stream size > 0 XOR have sparse block streams. - if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 { - if size != dataHdr.Size { - return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) - } - if _, err = io.Copy(t, br); err != nil { - return fmt.Errorf("%s: copying contents from data stream: %s", name, err) - } - } else if size > 0 { - // As of a recent OS change, BackupRead now returns a data stream for empty sparse files. - // These files have no sparse block streams, so skip the copySparse call if file size = 0. - if err = copySparse(t, br); err != nil { - return fmt.Errorf("%s: copying contents from sparse block stream: %s", name, err) - } - } - } - - // Look for streams after the data stream. The only ones we handle are alternate data streams. - // Other streams may have metadata that could be serialized, but the tar header has already - // been written. In practice, this means that we don't get EA or TXF metadata. - for { - bhdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - switch bhdr.Id { - case winio.BackupAlternateData: - altName := bhdr.Name - if strings.HasSuffix(altName, ":$DATA") { - altName = altName[:len(altName)-len(":$DATA")] - } - if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 { - hdr = &tar.Header{ - Format: hdr.Format, - Name: name + altName, - Mode: hdr.Mode, - Typeflag: tar.TypeReg, - Size: bhdr.Size, - ModTime: hdr.ModTime, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - err = t.WriteHeader(hdr) - if err != nil { - return err - } - _, err = io.Copy(t, br) - if err != nil { - return err - } - - } else { - // Unsupported for now, since the size of the alternate stream is not present - // in the backup stream until after the data has been read. - return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name) - } - case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: - // ignore these streams - default: - return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id) - } - } - return nil -} - -// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by -// WriteTarFileFromBackupStream. -func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { - name = hdr.Name - if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { - size = hdr.Size - } - fileInfo = &winio.FileBasicInfo{ - LastAccessTime: windows.NsecToFiletime(hdr.AccessTime.UnixNano()), - LastWriteTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()), - ChangeTime: windows.NsecToFiletime(hdr.ChangeTime.UnixNano()), - // Default to ModTime, we'll pull hdrCreationTime below if present - CreationTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()), - } - if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok { - attr, err := strconv.ParseUint(attrStr, 10, 32) - if err != nil { - return "", 0, nil, err - } - fileInfo.FileAttributes = uint32(attr) - } else { - if hdr.Typeflag == tar.TypeDir { - fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY - } - } - if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok { - creationTime, err := parsePAXTime(creationTimeStr) - if err != nil { - return "", 0, nil, err - } - fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano()) - } - return -} - -// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple -// tar file entries in order to collect all the alternate data streams for the file, it returns the next -// tar file that was not processed, or io.EOF is there are no more. -func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { - bw := winio.NewBackupStreamWriter(w) - - sd, err := SecurityDescriptorFromTarHeader(hdr) - if err != nil { - return nil, err - } - if len(sd) != 0 { - bhdr := winio.BackupHeader{ - Id: winio.BackupSecurity, - Size: int64(len(sd)), - } - err := bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = bw.Write(sd) - if err != nil { - return nil, err - } - } - - eadata, err := ExtendedAttributesFromTarHeader(hdr) - if err != nil { - return nil, err - } - if len(eadata) != 0 { - bhdr := winio.BackupHeader{ - Id: winio.BackupEaData, - Size: int64(len(eadata)), - } - err = bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = bw.Write(eadata) - if err != nil { - return nil, err - } - } - - if hdr.Typeflag == tar.TypeSymlink { - reparse := EncodeReparsePointFromTarHeader(hdr) - bhdr := winio.BackupHeader{ - Id: winio.BackupReparseData, - Size: int64(len(reparse)), - } - err := bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = bw.Write(reparse) - if err != nil { - return nil, err - } - - } - - if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { - bhdr := winio.BackupHeader{ - Id: winio.BackupData, - Size: hdr.Size, - } - err := bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = io.Copy(bw, t) - if err != nil { - return nil, err - } - } - // Copy all the alternate data streams and return the next non-ADS header. - for { - ahdr, err := t.Next() - if err != nil { - return nil, err - } - if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { - return ahdr, nil - } - bhdr := winio.BackupHeader{ - Id: winio.BackupAlternateData, - Size: ahdr.Size, - Name: ahdr.Name[len(hdr.Name):] + ":$DATA", - } - err = bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = io.Copy(bw, t) - if err != nil { - return nil, err - } - } -} diff --git a/test/vendor/github.com/Microsoft/go-winio/file.go b/test/vendor/github.com/Microsoft/go-winio/file.go deleted file mode 100644 index 293ab54c80..0000000000 --- a/test/vendor/github.com/Microsoft/go-winio/file.go +++ /dev/null @@ -1,329 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "errors" - "io" - "runtime" - "sync" - "sync/atomic" - "syscall" - "time" -) - -//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult - -type atomicBool int32 - -func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } -func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } -func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } -func (b *atomicBool) swap(new bool) bool { - var newInt int32 - if new { - newInt = 1 - } - return atomic.SwapInt32((*int32)(b), newInt) == 1 -} - -const ( - cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 - cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 -) - -var ( - ErrFileClosed = errors.New("file has already been closed") - ErrTimeout = &timeoutError{} -) - -type timeoutError struct{} - -func (e *timeoutError) Error() string { return "i/o timeout" } -func (e *timeoutError) Timeout() bool { return true } -func (e *timeoutError) Temporary() bool { return true } - -type timeoutChan chan struct{} - -var ioInitOnce sync.Once -var ioCompletionPort syscall.Handle - -// ioResult contains the result of an asynchronous IO operation -type ioResult struct { - bytes uint32 - err error -} - -// ioOperation represents an outstanding asynchronous Win32 IO -type ioOperation struct { - o syscall.Overlapped - ch chan ioResult -} - -func initIo() { - h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) - if err != nil { - panic(err) - } - ioCompletionPort = h - go ioCompletionProcessor(h) -} - -// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. -// It takes ownership of this handle and will close it if it is garbage collected. -type win32File struct { - handle syscall.Handle - wg sync.WaitGroup - wgLock sync.RWMutex - closing atomicBool - socket bool - readDeadline deadlineHandler - writeDeadline deadlineHandler -} - -type deadlineHandler struct { - setLock sync.Mutex - channel timeoutChan - channelLock sync.RWMutex - timer *time.Timer - timedout atomicBool -} - -// makeWin32File makes a new win32File from an existing file handle -func makeWin32File(h syscall.Handle) (*win32File, error) { - f := &win32File{handle: h} - ioInitOnce.Do(initIo) - _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) - if err != nil { - return nil, err - } - err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) - if err != nil { - return nil, err - } - f.readDeadline.channel = make(timeoutChan) - f.writeDeadline.channel = make(timeoutChan) - return f, nil -} - -func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - // If we return the result of makeWin32File directly, it can result in an - // interface-wrapped nil, rather than a nil interface value. - f, err := makeWin32File(h) - if err != nil { - return nil, err - } - return f, nil -} - -// closeHandle closes the resources associated with a Win32 handle -func (f *win32File) closeHandle() { - f.wgLock.Lock() - // Atomically set that we are closing, releasing the resources only once. - if !f.closing.swap(true) { - f.wgLock.Unlock() - // cancel all IO and wait for it to complete - cancelIoEx(f.handle, nil) - f.wg.Wait() - // at this point, no new IO can start - syscall.Close(f.handle) - f.handle = 0 - } else { - f.wgLock.Unlock() - } -} - -// Close closes a win32File. -func (f *win32File) Close() error { - f.closeHandle() - return nil -} - -// IsClosed checks if the file has been closed -func (f *win32File) IsClosed() bool { - return f.closing.isSet() -} - -// prepareIo prepares for a new IO operation. -// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. -func (f *win32File) prepareIo() (*ioOperation, error) { - f.wgLock.RLock() - if f.closing.isSet() { - f.wgLock.RUnlock() - return nil, ErrFileClosed - } - f.wg.Add(1) - f.wgLock.RUnlock() - c := &ioOperation{} - c.ch = make(chan ioResult) - return c, nil -} - -// ioCompletionProcessor processes completed async IOs forever -func ioCompletionProcessor(h syscall.Handle) { - for { - var bytes uint32 - var key uintptr - var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) - if op == nil { - panic(err) - } - op.ch <- ioResult{bytes, err} - } -} - -// asyncIo processes the return value from ReadFile or WriteFile, blocking until -// the operation has actually completed. -func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { - return int(bytes), err - } - - if f.closing.isSet() { - cancelIoEx(f.handle, &c.o) - } - - var timeout timeoutChan - if d != nil { - d.channelLock.Lock() - timeout = d.channel - d.channelLock.Unlock() - } - - var r ioResult - select { - case r = <-c.ch: - err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { - if f.closing.isSet() { - err = ErrFileClosed - } - } else if err != nil && f.socket { - // err is from Win32. Query the overlapped structure to get the winsock error. - var bytes, flags uint32 - err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) - } - case <-timeout: - cancelIoEx(f.handle, &c.o) - r = <-c.ch - err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { - err = ErrTimeout - } - } - - // runtime.KeepAlive is needed, as c is passed via native - // code to ioCompletionProcessor, c must remain alive - // until the channel read is complete. - runtime.KeepAlive(c) - return int(r.bytes), err -} - -// Read reads from a file handle. -func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIo() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.readDeadline.timedout.isSet() { - return 0, ErrTimeout - } - - var bytes uint32 - err = syscall.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.readDeadline, bytes, err) - runtime.KeepAlive(b) - - // Handle EOF conditions. - if err == nil && n == 0 && len(b) != 0 { - return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { - return 0, io.EOF - } else { - return n, err - } -} - -// Write writes to a file handle. -func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIo() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.writeDeadline.timedout.isSet() { - return 0, ErrTimeout - } - - var bytes uint32 - err = syscall.WriteFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) - runtime.KeepAlive(b) - return n, err -} - -func (f *win32File) SetReadDeadline(deadline time.Time) error { - return f.readDeadline.set(deadline) -} - -func (f *win32File) SetWriteDeadline(deadline time.Time) error { - return f.writeDeadline.set(deadline) -} - -func (f *win32File) Flush() error { - return syscall.FlushFileBuffers(f.handle) -} - -func (f *win32File) Fd() uintptr { - return uintptr(f.handle) -} - -func (d *deadlineHandler) set(deadline time.Time) error { - d.setLock.Lock() - defer d.setLock.Unlock() - - if d.timer != nil { - if !d.timer.Stop() { - <-d.channel - } - d.timer = nil - } - d.timedout.setFalse() - - select { - case <-d.channel: - d.channelLock.Lock() - d.channel = make(chan struct{}) - d.channelLock.Unlock() - default: - } - - if deadline.IsZero() { - return nil - } - - timeoutIO := func() { - d.timedout.setTrue() - close(d.channel) - } - - now := time.Now() - duration := deadline.Sub(now) - if deadline.After(now) { - // Deadline is in the future, set a timer to wait - d.timer = time.AfterFunc(duration, timeoutIO) - } else { - // Deadline is in the past. Cancel all pending IO now. - timeoutIO() - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/go-winio/hvsock.go b/test/vendor/github.com/Microsoft/go-winio/hvsock.go deleted file mode 100644 index b2b644d002..0000000000 --- a/test/vendor/github.com/Microsoft/go-winio/hvsock.go +++ /dev/null @@ -1,316 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "fmt" - "io" - "net" - "os" - "syscall" - "time" - "unsafe" - - "github.com/Microsoft/go-winio/pkg/guid" -) - -//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind - -const ( - afHvSock = 34 // AF_HYPERV - - socketError = ^uintptr(0) -) - -// An HvsockAddr is an address for a AF_HYPERV socket. -type HvsockAddr struct { - VMID guid.GUID - ServiceID guid.GUID -} - -type rawHvsockAddr struct { - Family uint16 - _ uint16 - VMID guid.GUID - ServiceID guid.GUID -} - -// Network returns the address's network name, "hvsock". -func (addr *HvsockAddr) Network() string { - return "hvsock" -} - -func (addr *HvsockAddr) String() string { - return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) -} - -// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. -func VsockServiceID(port uint32) guid.GUID { - g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") - g.Data1 = port - return g -} - -func (addr *HvsockAddr) raw() rawHvsockAddr { - return rawHvsockAddr{ - Family: afHvSock, - VMID: addr.VMID, - ServiceID: addr.ServiceID, - } -} - -func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { - addr.VMID = raw.VMID - addr.ServiceID = raw.ServiceID -} - -// HvsockListener is a socket listener for the AF_HYPERV address family. -type HvsockListener struct { - sock *win32File - addr HvsockAddr -} - -// HvsockConn is a connected socket of the AF_HYPERV address family. -type HvsockConn struct { - sock *win32File - local, remote HvsockAddr -} - -func newHvSocket() (*win32File, error) { - fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) - if err != nil { - return nil, os.NewSyscallError("socket", err) - } - f, err := makeWin32File(fd) - if err != nil { - syscall.Close(fd) - return nil, err - } - f.socket = true - return f, nil -} - -// ListenHvsock listens for connections on the specified hvsock address. -func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { - l := &HvsockListener{addr: *addr} - sock, err := newHvSocket() - if err != nil { - return nil, l.opErr("listen", err) - } - sa := addr.raw() - err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("socket", err)) - } - err = syscall.Listen(sock.handle, 16) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("listen", err)) - } - return &HvsockListener{sock: sock, addr: *addr}, nil -} - -func (l *HvsockListener) opErr(op string, err error) error { - return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} -} - -// Addr returns the listener's network address. -func (l *HvsockListener) Addr() net.Addr { - return &l.addr -} - -// Accept waits for the next connection and returns it. -func (l *HvsockListener) Accept() (_ net.Conn, err error) { - sock, err := newHvSocket() - if err != nil { - return nil, l.opErr("accept", err) - } - defer func() { - if sock != nil { - sock.Close() - } - }() - c, err := l.sock.prepareIo() - if err != nil { - return nil, l.opErr("accept", err) - } - defer l.sock.wg.Done() - - // AcceptEx, per documentation, requires an extra 16 bytes per address. - const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) - var addrbuf [addrlen * 2]byte - - var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) - _, err = l.sock.asyncIo(c, nil, bytes, err) - if err != nil { - return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) - } - conn := &HvsockConn{ - sock: sock, - } - conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) - conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) - sock = nil - return conn, nil -} - -// Close closes the listener, causing any pending Accept calls to fail. -func (l *HvsockListener) Close() error { - return l.sock.Close() -} - -/* Need to finish ConnectEx handling -func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { - sock, err := newHvSocket() - if err != nil { - return nil, err - } - defer func() { - if sock != nil { - sock.Close() - } - }() - c, err := sock.prepareIo() - if err != nil { - return nil, err - } - defer sock.wg.Done() - var bytes uint32 - err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) - _, err = sock.asyncIo(ctx, c, nil, bytes, err) - if err != nil { - return nil, err - } - conn := &HvsockConn{ - sock: sock, - remote: *addr, - } - sock = nil - return conn, nil -} -*/ - -func (conn *HvsockConn) opErr(op string, err error) error { - return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} -} - -func (conn *HvsockConn) Read(b []byte) (int, error) { - c, err := conn.sock.prepareIo() - if err != nil { - return 0, conn.opErr("read", err) - } - defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var flags, bytes uint32 - err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) - if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsarecv", err) - } - return 0, conn.opErr("read", err) - } else if n == 0 { - err = io.EOF - } - return n, err -} - -func (conn *HvsockConn) Write(b []byte) (int, error) { - t := 0 - for len(b) != 0 { - n, err := conn.write(b) - if err != nil { - return t + n, err - } - t += n - b = b[n:] - } - return t, nil -} - -func (conn *HvsockConn) write(b []byte) (int, error) { - c, err := conn.sock.prepareIo() - if err != nil { - return 0, conn.opErr("write", err) - } - defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var bytes uint32 - err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) - if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsasend", err) - } - return 0, conn.opErr("write", err) - } - return n, err -} - -// Close closes the socket connection, failing any pending read or write calls. -func (conn *HvsockConn) Close() error { - return conn.sock.Close() -} - -func (conn *HvsockConn) IsClosed() bool { - return conn.sock.IsClosed() -} - -func (conn *HvsockConn) shutdown(how int) error { - if conn.IsClosed() { - return ErrFileClosed - } - - err := syscall.Shutdown(conn.sock.handle, how) - if err != nil { - return os.NewSyscallError("shutdown", err) - } - return nil -} - -// CloseRead shuts down the read end of the socket, preventing future read operations. -func (conn *HvsockConn) CloseRead() error { - err := conn.shutdown(syscall.SHUT_RD) - if err != nil { - return conn.opErr("close", err) - } - return nil -} - -// CloseWrite shuts down the write end of the socket, preventing future write operations and -// notifying the other endpoint that no more data will be written. -func (conn *HvsockConn) CloseWrite() error { - err := conn.shutdown(syscall.SHUT_WR) - if err != nil { - return conn.opErr("close", err) - } - return nil -} - -// LocalAddr returns the local address of the connection. -func (conn *HvsockConn) LocalAddr() net.Addr { - return &conn.local -} - -// RemoteAddr returns the remote address of the connection. -func (conn *HvsockConn) RemoteAddr() net.Addr { - return &conn.remote -} - -// SetDeadline implements the net.Conn SetDeadline method. -func (conn *HvsockConn) SetDeadline(t time.Time) error { - conn.SetReadDeadline(t) - conn.SetWriteDeadline(t) - return nil -} - -// SetReadDeadline implements the net.Conn SetReadDeadline method. -func (conn *HvsockConn) SetReadDeadline(t time.Time) error { - return conn.sock.SetReadDeadline(t) -} - -// SetWriteDeadline implements the net.Conn SetWriteDeadline method. -func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { - return conn.sock.SetWriteDeadline(t) -} diff --git a/test/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/test/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go deleted file mode 100644 index 2d9161e2de..0000000000 --- a/test/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ /dev/null @@ -1,228 +0,0 @@ -// +build windows - -// Package guid provides a GUID type. The backing structure for a GUID is -// identical to that used by the golang.org/x/sys/windows GUID type. -// There are two main binary encodings used for a GUID, the big-endian encoding, -// and the Windows (mixed-endian) encoding. See here for details: -// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding -package guid - -import ( - "crypto/rand" - "crypto/sha1" - "encoding" - "encoding/binary" - "fmt" - "strconv" -) - -// Variant specifies which GUID variant (or "type") of the GUID. It determines -// how the entirety of the rest of the GUID is interpreted. -type Variant uint8 - -// The variants specified by RFC 4122. -const ( - // VariantUnknown specifies a GUID variant which does not conform to one of - // the variant encodings specified in RFC 4122. - VariantUnknown Variant = iota - VariantNCS - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// Version specifies how the bits in the GUID were generated. For instance, a -// version 4 GUID is randomly generated, and a version 5 is generated from the -// hash of an input string. -type Version uint8 - -var _ = (encoding.TextMarshaler)(GUID{}) -var _ = (encoding.TextUnmarshaler)(&GUID{}) - -// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. -func NewV4() (GUID, error) { - var b [16]byte - if _, err := rand.Read(b[:]); err != nil { - return GUID{}, err - } - - g := FromArray(b) - g.setVersion(4) // Version 4 means randomly generated. - g.setVariant(VariantRFC4122) - - return g, nil -} - -// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) -// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, -// and the sample code treats it as a series of bytes, so we do the same here. -// -// Some implementations, such as those found on Windows, treat the name as a -// big-endian UTF16 stream of bytes. If that is desired, the string can be -// encoded as such before being passed to this function. -func NewV5(namespace GUID, name []byte) (GUID, error) { - b := sha1.New() - namespaceBytes := namespace.ToArray() - b.Write(namespaceBytes[:]) - b.Write(name) - - a := [16]byte{} - copy(a[:], b.Sum(nil)) - - g := FromArray(a) - g.setVersion(5) // Version 5 means generated from a string. - g.setVariant(VariantRFC4122) - - return g, nil -} - -func fromArray(b [16]byte, order binary.ByteOrder) GUID { - var g GUID - g.Data1 = order.Uint32(b[0:4]) - g.Data2 = order.Uint16(b[4:6]) - g.Data3 = order.Uint16(b[6:8]) - copy(g.Data4[:], b[8:16]) - return g -} - -func (g GUID) toArray(order binary.ByteOrder) [16]byte { - b := [16]byte{} - order.PutUint32(b[0:4], g.Data1) - order.PutUint16(b[4:6], g.Data2) - order.PutUint16(b[6:8], g.Data3) - copy(b[8:16], g.Data4[:]) - return b -} - -// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. -func FromArray(b [16]byte) GUID { - return fromArray(b, binary.BigEndian) -} - -// ToArray returns an array of 16 bytes representing the GUID in big-endian -// encoding. -func (g GUID) ToArray() [16]byte { - return g.toArray(binary.BigEndian) -} - -// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. -func FromWindowsArray(b [16]byte) GUID { - return fromArray(b, binary.LittleEndian) -} - -// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows -// encoding. -func (g GUID) ToWindowsArray() [16]byte { - return g.toArray(binary.LittleEndian) -} - -func (g GUID) String() string { - return fmt.Sprintf( - "%08x-%04x-%04x-%04x-%012x", - g.Data1, - g.Data2, - g.Data3, - g.Data4[:2], - g.Data4[2:]) -} - -// FromString parses a string containing a GUID and returns the GUID. The only -// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -// format. -func FromString(s string) (GUID, error) { - if len(s) != 36 { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - - var g GUID - - data1, err := strconv.ParseUint(s[0:8], 16, 32) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data1 = uint32(data1) - - data2, err := strconv.ParseUint(s[9:13], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data2 = uint16(data2) - - data3, err := strconv.ParseUint(s[14:18], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data3 = uint16(data3) - - for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { - v, err := strconv.ParseUint(s[x:x+2], 16, 8) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data4[i] = uint8(v) - } - - return g, nil -} - -func (g *GUID) setVariant(v Variant) { - d := g.Data4[0] - switch v { - case VariantNCS: - d = (d & 0x7f) - case VariantRFC4122: - d = (d & 0x3f) | 0x80 - case VariantMicrosoft: - d = (d & 0x1f) | 0xc0 - case VariantFuture: - d = (d & 0x0f) | 0xe0 - case VariantUnknown: - fallthrough - default: - panic(fmt.Sprintf("invalid variant: %d", v)) - } - g.Data4[0] = d -} - -// Variant returns the GUID variant, as defined in RFC 4122. -func (g GUID) Variant() Variant { - b := g.Data4[0] - if b&0x80 == 0 { - return VariantNCS - } else if b&0xc0 == 0x80 { - return VariantRFC4122 - } else if b&0xe0 == 0xc0 { - return VariantMicrosoft - } else if b&0xe0 == 0xe0 { - return VariantFuture - } - return VariantUnknown -} - -func (g *GUID) setVersion(v Version) { - g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) -} - -// Version returns the GUID version, as defined in RFC 4122. -func (g GUID) Version() Version { - return Version((g.Data3 & 0xF000) >> 12) -} - -// MarshalText returns the textual representation of the GUID. -func (g GUID) MarshalText() ([]byte, error) { - return []byte(g.String()), nil -} - -// UnmarshalText takes the textual representation of a GUID, and unmarhals it -// into this GUID. -func (g *GUID) UnmarshalText(text []byte) error { - g2, err := FromString(string(text)) - if err != nil { - return err - } - *g = g2 - return nil -} diff --git a/test/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/test/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go deleted file mode 100644 index f64d828c0b..0000000000 --- a/test/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package guid - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type as that is only available to builds -// targeted at `windows`. The representation matches that used by native Windows -// code. -type GUID struct { - Data1 uint32 - Data2 uint16 - Data3 uint16 - Data4 [8]byte -} diff --git a/test/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/test/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go deleted file mode 100644 index 83617f4eee..0000000000 --- a/test/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package guid - -import "golang.org/x/sys/windows" - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID diff --git a/test/vendor/github.com/Microsoft/go-winio/privilege.go b/test/vendor/github.com/Microsoft/go-winio/privilege.go deleted file mode 100644 index c3dd7c2176..0000000000 --- a/test/vendor/github.com/Microsoft/go-winio/privilege.go +++ /dev/null @@ -1,203 +0,0 @@ -// +build windows - -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "runtime" - "sync" - "syscall" - "unicode/utf16" - - "golang.org/x/sys/windows" -) - -//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges -//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf -//sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread -//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW -//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW -//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW - -const ( - SE_PRIVILEGE_ENABLED = 2 - - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 - - SeBackupPrivilege = "SeBackupPrivilege" - SeRestorePrivilege = "SeRestorePrivilege" - SeSecurityPrivilege = "SeSecurityPrivilege" -) - -const ( - securityAnonymous = iota - securityIdentification - securityImpersonation - securityDelegation -) - -var ( - privNames = make(map[string]uint64) - privNameMutex sync.Mutex -) - -// PrivilegeError represents an error enabling privileges. -type PrivilegeError struct { - privileges []uint64 -} - -func (e *PrivilegeError) Error() string { - s := "" - if len(e.privileges) > 1 { - s = "Could not enable privileges " - } else { - s = "Could not enable privilege " - } - for i, p := range e.privileges { - if i != 0 { - s += ", " - } - s += `"` - s += getPrivilegeName(p) - s += `"` - } - return s -} - -// RunWithPrivilege enables a single privilege for a function call. -func RunWithPrivilege(name string, fn func() error) error { - return RunWithPrivileges([]string{name}, fn) -} - -// RunWithPrivileges enables privileges for a function call. -func RunWithPrivileges(names []string, fn func() error) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - token, err := newThreadToken() - if err != nil { - return err - } - defer releaseThreadToken(token) - err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) - if err != nil { - return err - } - return fn() -} - -func mapPrivileges(names []string) ([]uint64, error) { - var privileges []uint64 - privNameMutex.Lock() - defer privNameMutex.Unlock() - for _, name := range names { - p, ok := privNames[name] - if !ok { - err := lookupPrivilegeValue("", name, &p) - if err != nil { - return nil, err - } - privNames[name] = p - } - privileges = append(privileges, p) - } - return privileges, nil -} - -// EnableProcessPrivileges enables privileges globally for the process. -func EnableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) -} - -// DisableProcessPrivileges disables privileges globally for the process. -func DisableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, 0) -} - -func enableDisableProcessPrivilege(names []string, action uint32) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - - p, _ := windows.GetCurrentProcess() - var token windows.Token - err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) - if err != nil { - return err - } - - defer token.Close() - return adjustPrivileges(token, privileges, action) -} - -func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { - var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) - for _, p := range privileges { - binary.Write(&b, binary.LittleEndian, p) - binary.Write(&b, binary.LittleEndian, action) - } - prevState := make([]byte, b.Len()) - reqSize := uint32(0) - success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) - if !success { - return err - } - if err == ERROR_NOT_ALL_ASSIGNED { - return &PrivilegeError{privileges} - } - return nil -} - -func getPrivilegeName(luid uint64) string { - var nameBuffer [256]uint16 - bufSize := uint32(len(nameBuffer)) - err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) - if err != nil { - return fmt.Sprintf("", luid) - } - - var displayNameBuffer [256]uint16 - displayBufSize := uint32(len(displayNameBuffer)) - var langID uint32 - err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) - if err != nil { - return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) - } - - return string(utf16.Decode(displayNameBuffer[:displayBufSize])) -} - -func newThreadToken() (windows.Token, error) { - err := impersonateSelf(securityImpersonation) - if err != nil { - return 0, err - } - - var token windows.Token - err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) - if err != nil { - rerr := revertToSelf() - if rerr != nil { - panic(rerr) - } - return 0, err - } - return token, nil -} - -func releaseThreadToken(h windows.Token) { - err := revertToSelf() - if err != nil { - panic(err) - } - h.Close() -} diff --git a/test/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/test/vendor/github.com/Microsoft/go-winio/vhd/vhd.go deleted file mode 100644 index f7f78fc230..0000000000 --- a/test/vendor/github.com/Microsoft/go-winio/vhd/vhd.go +++ /dev/null @@ -1,350 +0,0 @@ -//go:build windows -// +build windows - -package vhd - -import ( - "fmt" - "syscall" - - "github.com/Microsoft/go-winio/pkg/guid" - "golang.org/x/sys/windows" -) - -//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go - -//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk -//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk -//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk -//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk -//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath - -type ( - CreateVirtualDiskFlag uint32 - VirtualDiskFlag uint32 - AttachVirtualDiskFlag uint32 - DetachVirtualDiskFlag uint32 - VirtualDiskAccessMask uint32 -) - -type VirtualStorageType struct { - DeviceID uint32 - VendorID guid.GUID -} - -type CreateVersion2 struct { - UniqueID guid.GUID - MaximumSize uint64 - BlockSizeInBytes uint32 - SectorSizeInBytes uint32 - PhysicalSectorSizeInByte uint32 - ParentPath *uint16 // string - SourcePath *uint16 // string - OpenFlags uint32 - ParentVirtualStorageType VirtualStorageType - SourceVirtualStorageType VirtualStorageType - ResiliencyGUID guid.GUID -} - -type CreateVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 - Version2 CreateVersion2 -} - -type OpenVersion2 struct { - GetInfoOnly bool - ReadOnly bool - ResiliencyGUID guid.GUID -} - -type OpenVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 - Version2 OpenVersion2 -} - -// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, -// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating -// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods. -type openVersion2 struct { - getInfoOnly int32 - readOnly int32 - resiliencyGUID guid.GUID -} - -type openVirtualDiskParameters struct { - version uint32 - version2 openVersion2 -} - -type AttachVersion2 struct { - RestrictedOffset uint64 - RestrictedLength uint64 -} - -type AttachVirtualDiskParameters struct { - Version uint32 - Version2 AttachVersion2 -} - -const ( - VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3 - - // Access Mask for opening a VHD - VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000 - VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000 - VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000 - VirtualDiskAccessDetach VirtualDiskAccessMask = 0x00040000 - VirtualDiskAccessGetInfo VirtualDiskAccessMask = 0x00080000 - VirtualDiskAccessCreate VirtualDiskAccessMask = 0x00100000 - VirtualDiskAccessMetaOps VirtualDiskAccessMask = 0x00200000 - VirtualDiskAccessRead VirtualDiskAccessMask = 0x000d0000 - VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000 - VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000 - - // Flags for creating a VHD - CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0 - CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1 - CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2 - CreateVirtualDiskFlagDoNotCopyMetadataFromParent CreateVirtualDiskFlag = 0x4 - CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8 - CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10 - CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20 - CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 - CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80 - CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 - CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200 - - // Flags for opening a VHD - OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000 - OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001 - OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002 - OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x00000004 - OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x00000008 - OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x00000010 - OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x00000020 - OpenVirtualDiskFlagVhdsetFileOnly VirtualDiskFlag = 0x00000040 - OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x00000080 - OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100 - OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200 - - // Flags for attaching a VHD - AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000 - AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001 - AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002 - AttachVirtualDiskFlagPermanentLifetime AttachVirtualDiskFlag = 0x00000004 - AttachVirtualDiskFlagNoLocalHost AttachVirtualDiskFlag = 0x00000008 - AttachVirtualDiskFlagNoSecurityDescriptor AttachVirtualDiskFlag = 0x00000010 - AttachVirtualDiskFlagBypassDefaultEncryptionPolicy AttachVirtualDiskFlag = 0x00000020 - AttachVirtualDiskFlagNonPnp AttachVirtualDiskFlag = 0x00000040 - AttachVirtualDiskFlagRestrictedRange AttachVirtualDiskFlag = 0x00000080 - AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100 - AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200 - - // Flags for detaching a VHD - DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0 -) - -// CreateVhdx is a helper function to create a simple vhdx file at the given path using -// default values. -func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { - params := CreateVirtualDiskParameters{ - Version: 2, - Version2: CreateVersion2{ - MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024, - BlockSizeInBytes: blockSizeInMb * 1024 * 1024, - }, - } - - handle, err := CreateVirtualDisk(path, VirtualDiskAccessNone, CreateVirtualDiskFlagNone, ¶ms) - if err != nil { - return err - } - - return syscall.CloseHandle(handle) -} - -// DetachVirtualDisk detaches a virtual hard disk by handle. -func DetachVirtualDisk(handle syscall.Handle) (err error) { - if err := detachVirtualDisk(handle, 0, 0); err != nil { - return fmt.Errorf("failed to detach virtual disk: %w", err) - } - return nil -} - -// DetachVhd detaches a vhd found at `path`. -func DetachVhd(path string) error { - handle, err := OpenVirtualDisk( - path, - VirtualDiskAccessNone, - OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, - ) - if err != nil { - return err - } - defer syscall.CloseHandle(handle) - return DetachVirtualDisk(handle) -} - -// AttachVirtualDisk attaches a virtual hard disk for use. -func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) { - // Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5. - if err := attachVirtualDisk( - handle, - nil, - uint32(attachVirtualDiskFlag), - 0, - parameters, - nil, - ); err != nil { - return fmt.Errorf("failed to attach virtual disk: %w", err) - } - return nil -} - -// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2 -// of the ATTACH_VIRTUAL_DISK_PARAMETERS. -func AttachVhd(path string) (err error) { - handle, err := OpenVirtualDisk( - path, - VirtualDiskAccessNone, - OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, - ) - if err != nil { - return err - } - - defer syscall.CloseHandle(handle) - params := AttachVirtualDiskParameters{Version: 2} - if err := AttachVirtualDisk( - handle, - AttachVirtualDiskFlagNone, - ¶ms, - ); err != nil { - return fmt.Errorf("failed to attach virtual disk: %w", err) - } - return nil -} - -// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags. -func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) { - parameters := OpenVirtualDiskParameters{Version: 2} - handle, err := OpenVirtualDiskWithParameters( - vhdPath, - virtualDiskAccessMask, - openVirtualDiskFlags, - ¶meters, - ) - if err != nil { - return 0, err - } - return handle, nil -} - -// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters. -func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) { - var ( - handle syscall.Handle - defaultType VirtualStorageType - getInfoOnly int32 - readOnly int32 - ) - if parameters.Version != 2 { - return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) - } - if parameters.Version2.GetInfoOnly { - getInfoOnly = 1 - } - if parameters.Version2.ReadOnly { - readOnly = 1 - } - params := &openVirtualDiskParameters{ - version: parameters.Version, - version2: openVersion2{ - getInfoOnly, - readOnly, - parameters.Version2.ResiliencyGUID, - }, - } - if err := openVirtualDisk( - &defaultType, - vhdPath, - uint32(virtualDiskAccessMask), - uint32(openVirtualDiskFlags), - params, - &handle, - ); err != nil { - return 0, fmt.Errorf("failed to open virtual disk: %w", err) - } - return handle, nil -} - -// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk. -func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) { - var ( - handle syscall.Handle - defaultType VirtualStorageType - ) - if parameters.Version != 2 { - return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) - } - - if err := createVirtualDisk( - &defaultType, - path, - uint32(virtualDiskAccessMask), - nil, - uint32(createVirtualDiskFlags), - 0, - parameters, - nil, - &handle, - ); err != nil { - return handle, fmt.Errorf("failed to create virtual disk: %w", err) - } - return handle, nil -} - -// GetVirtualDiskPhysicalPath takes a handle to a virtual hard disk and returns the physical -// path of the disk on the machine. This path is in the form \\.\PhysicalDriveX where X is an integer -// that represents the particular enumeration of the physical disk on the caller's system. -func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { - var ( - diskPathSizeInBytes uint32 = 256 * 2 // max path length 256 wide chars - diskPhysicalPathBuf [256]uint16 - ) - if err := getVirtualDiskPhysicalPath( - handle, - &diskPathSizeInBytes, - &diskPhysicalPathBuf[0], - ); err != nil { - return "", fmt.Errorf("failed to get disk physical path: %w", err) - } - return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil -} - -// CreateDiffVhd is a helper function to create a differencing virtual disk. -func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error { - // Setting `ParentPath` is how to signal to create a differencing disk. - createParams := &CreateVirtualDiskParameters{ - Version: 2, - Version2: CreateVersion2{ - ParentPath: windows.StringToUTF16Ptr(baseVhdPath), - BlockSizeInBytes: blockSizeInMB * 1024 * 1024, - OpenFlags: uint32(OpenVirtualDiskFlagCachedIO), - }, - } - - vhdHandle, err := CreateVirtualDisk( - diffVhdPath, - VirtualDiskAccessNone, - CreateVirtualDiskFlagNone, - createParams, - ) - if err != nil { - return fmt.Errorf("failed to create differencing vhd: %w", err) - } - if err := syscall.CloseHandle(vhdHandle); err != nil { - return fmt.Errorf("failed to close differencing vhd handle: %w", err) - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/test/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go deleted file mode 100644 index 1d7498db3b..0000000000 --- a/test/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go +++ /dev/null @@ -1,106 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package vhd - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") - - procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") - procCreateVirtualDisk = modvirtdisk.NewProc("CreateVirtualDisk") - procDetachVirtualDisk = modvirtdisk.NewProc("DetachVirtualDisk") - procGetVirtualDiskPhysicalPath = modvirtdisk.NewProc("GetVirtualDiskPhysicalPath") - procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") -) - -func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) { - r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { - var _p0 *uint16 - _p0, win32err = syscall.UTF16PtrFromString(path) - if win32err != nil { - return - } - return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle) -} - -func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { - r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) { - r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) { - r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { - var _p0 *uint16 - _p0, win32err = syscall.UTF16PtrFromString(path) - if win32err != nil { - return - } - return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) -} - -func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { - r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/.gitattributes b/test/vendor/github.com/Microsoft/hcsshim/.gitattributes deleted file mode 100644 index dd0d09faac..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/.gitattributes +++ /dev/null @@ -1,3 +0,0 @@ -* text=auto eol=lf -vendor/** -text -test/vendor/** -text \ No newline at end of file diff --git a/test/vendor/github.com/Microsoft/hcsshim/.gitignore b/test/vendor/github.com/Microsoft/hcsshim/.gitignore deleted file mode 100644 index e81c4f97e3..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/.gitignore +++ /dev/null @@ -1,45 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Ignore vscode setting files -.vscode/ -.idea/ - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -# Ignore gcs bin directory -service/bin/ -service/pkg/ - -*.img -*.vhd -*.tar.gz -*.tar - -# Make stuff -.rootfs-done -bin/* -rootfs/* -rootfs-conv/* -*.o -/build/ - -deps/* -out/* - -# test results -test/results - -# go workspace files -go.work -go.work.sum diff --git a/test/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/test/vendor/github.com/Microsoft/hcsshim/.golangci.yml deleted file mode 100644 index a6ba730245..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/.golangci.yml +++ /dev/null @@ -1,110 +0,0 @@ -run: - timeout: 8m - -linters: - enable: - - gofmt - - stylecheck - -linters-settings: - stylecheck: - # https://staticcheck.io/docs/checks - checks: ["all"] - - -issues: - # This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like - # (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go - # friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't - # supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms, - # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change. - exclude-rules: - - path: layer.go - linters: - - stylecheck - Text: "ST1003:" - - - path: hcsshim.go - linters: - - stylecheck - Text: "ST1003:" - - - path: cmd\\ncproxy\\nodenetsvc\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: cmd\\ncproxy_mock\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\hcs\\schema2\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\wclayer\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: hcn\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\hcs\\schema1\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\hns\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: ext4\\internal\\compactext4\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: ext4\\internal\\format\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\guestrequest\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\guest\\prot\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\windevice\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\winapi\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\vmcompute\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\regstate\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\hcserror\\ - linters: - - stylecheck - Text: "ST1003:" \ No newline at end of file diff --git a/test/vendor/github.com/Microsoft/hcsshim/Makefile b/test/vendor/github.com/Microsoft/hcsshim/Makefile deleted file mode 100644 index ea0d88748d..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/Makefile +++ /dev/null @@ -1,121 +0,0 @@ -BASE:=base.tar.gz -DEV_BUILD:=0 - -GO:=go -GO_FLAGS:=-ldflags "-s -w" # strip Go binaries -CGO_ENABLED:=0 -GOMODVENDOR:= - -CFLAGS:=-O2 -Wall -LDFLAGS:=-static -s # strip C binaries - -GO_FLAGS_EXTRA:= -ifeq "$(GOMODVENDOR)" "1" -GO_FLAGS_EXTRA += -mod=vendor -endif -GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA) - -SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) -# additional directories to search for rule prerequisites and targets -VPATH=$(SRCROOT) - -DELTA_TARGET=out/delta.tar.gz - -ifeq "$(DEV_BUILD)" "1" -DELTA_TARGET=out/delta-dev.tar.gz -endif - -# The link aliases for gcstools -GCS_TOOLS=\ - generichook \ - install-drivers - -.PHONY: all always rootfs test - -.DEFAULT_GOAL := all - -all: out/initrd.img out/rootfs.tar.gz - -clean: - find -name '*.o' -print0 | xargs -0 -r rm - rm -rf bin deps rootfs out - -test: - cd $(SRCROOT) && $(GO) test -v ./internal/guest/... - -rootfs: out/rootfs.vhd - -out/rootfs.vhd: out/rootfs.tar.gz bin/cmd/tar2ext4 - gzip -f -d ./out/rootfs.tar.gz - bin/cmd/tar2ext4 -vhd -i ./out/rootfs.tar -o $@ - -out/rootfs.tar.gz: out/initrd.img - rm -rf rootfs-conv - mkdir rootfs-conv - gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) - tar -zcf $@ -C rootfs-conv . - rm -rf rootfs-conv - -out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh - $(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed - gzip -c out/initrd.img.uncompressed > $@ - rm out/initrd.img.uncompressed - -# This target includes utilities which may be useful for testing purposes. -out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report - rm -rf rootfs-dev - mkdir rootfs-dev - tar -xzf out/delta.tar.gz -C rootfs-dev - cp bin/internal/tools/snp-report rootfs-dev/bin/ - tar -zcf $@ -C rootfs-dev . - rm -rf rootfs-dev - -out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths Makefile - @mkdir -p out - rm -rf rootfs - mkdir -p rootfs/bin/ - mkdir -p rootfs/info/ - cp bin/init rootfs/ - cp bin/vsockexec rootfs/bin/ - cp bin/cmd/gcs rootfs/bin/ - cp bin/cmd/gcstools rootfs/bin/ - cp bin/cmd/hooks/wait-paths rootfs/bin/ - for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done - git -C $(SRCROOT) rev-parse HEAD > rootfs/info/gcs.commit && \ - git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/info/gcs.branch && \ - date --iso-8601=minute --utc > rootfs/info/tar.date - $(if $(and $(realpath $(subst .tar,.testdata.json,$(BASE))), $(shell which jq)), \ - jq -r '.IMAGE_NAME' $(subst .tar,.testdata.json,$(BASE)) 2>/dev/null > rootfs/info/image.name && \ - jq -r '.DATETIME' $(subst .tar,.testdata.json,$(BASE)) 2>/dev/null > rootfs/info/build.date) - tar -zcf $@ -C rootfs . - rm -rf rootfs - --include deps/cmd/gcs.gomake --include deps/cmd/gcstools.gomake --include deps/cmd/hooks/wait-paths.gomake --include deps/cmd/tar2ext4.gomake --include deps/internal/tools/snp-report.gomake - -# Implicit rule for includes that define Go targets. -%.gomake: $(SRCROOT)/Makefile - @mkdir -p $(dir $@) - @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new - @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new - @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new - @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new - @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new - @/bin/echo -e '\tmv $$@.new $$@' >> $@.new - @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new - mv $@.new $@ - -bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o - @mkdir -p bin - $(CC) $(LDFLAGS) -o $@ $^ - -bin/init: init/init.o vsockexec/vsock.o - @mkdir -p bin - $(CC) $(LDFLAGS) -o $@ $^ - -%.o: %.c - @mkdir -p $(dir $@) - $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< diff --git a/test/vendor/github.com/Microsoft/hcsshim/Protobuild.toml b/test/vendor/github.com/Microsoft/hcsshim/Protobuild.toml deleted file mode 100644 index 471f133866..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/Protobuild.toml +++ /dev/null @@ -1,48 +0,0 @@ -version = "1" -generator = "gogoctrd" -plugins = ["grpc", "fieldpath"] - -# Control protoc include paths. Below are usually some good defaults, but feel -# free to try it without them if it works for your project. -[includes] - # Include paths that will be added before all others. Typically, you want to - # treat the root of the project as an include, but this may not be necessary. - before = ["./protobuf"] - - # Paths that should be treated as include roots in relation to the vendor - # directory. These will be calculated with the vendor directory nearest the - # target package. - packages = ["github.com/gogo/protobuf"] - -# This section maps protobuf imports to Go packages. These will become -# `-M` directives in the call to the go protobuf generator. -[packages] - "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto" - "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/empty.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/struct.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types" - "github/containerd/cgroups/stats/v1/metrics.proto" = "github.com/containerd/cgroups/stats/v1" - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"] -plugins = ["ttrpc"] - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/extendedtask"] -plugins = ["ttrpc"] - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"] -plugins = ["ttrpc"] - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"] -plugins = ["ttrpc"] - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/vmservice"] -plugins = ["ttrpc"] \ No newline at end of file diff --git a/test/vendor/github.com/Microsoft/hcsshim/README.md b/test/vendor/github.com/Microsoft/hcsshim/README.md deleted file mode 100644 index 690e2dab97..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/README.md +++ /dev/null @@ -1,120 +0,0 @@ -# hcsshim - -[![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster) - -This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS), as well as code for the [guest agent](./internal/guest/README.md) (commonly referred to as the GCS or Guest Compute Service in the codebase) used to support running Linux Hyper-V containers. - -It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd](https://github.com/containerd/containerd) projects, but it can be freely used by other projects as well. - -## Building - -While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). -### Linux Hyper-V Container Guest Agent - -To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs. -```powershell -C:\> $env:GOOS="linux" -C:\> go build .\cmd\gcs\ -``` - -or on a Linux machine -```sh -> go build ./cmd/gcs -``` - -If you want it to be packaged inside of a rootfs to boot with alongside all of the other tools then you'll need to provide a rootfs that it can be packaged inside of. An easy way is to export the rootfs of a container. - -```sh -docker pull busybox -docker run --name base_image_container busybox -docker export base_image_container | gzip > base.tar.gz -BASE=./base.tar.gz -make all -``` - -If the build is successful, in the `./out` folder you should see: -```sh -> ls ./out/ -delta.tar.gz initrd.img rootfs.tar.gz -``` - -### Containerd Shim -For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md. - -Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. - -```powershell -C:\> $env:GOOS="windows" -C:\> go build .\cmd\containerd-shim-runhcs-v1 -``` - -Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running: -```powershell -.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii -``` - -This config file will already have the shim set as the default runtime for cri interactions. - -To trial using the shim out with ctr.exe: -```powershell -C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!" -``` - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. - -We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to -certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for -more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure -that all commits in a given PR are signed-off. - -### Test Directory (Important to note) - -This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this -project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has -its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file -has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project -(which is the repo itself on your disk). - -``` -replace ( - github.com/Microsoft/hcsshim => ../ -) -``` - -Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the -CI in this project will check if the files are out of date and will fail if this is true. - - -## Code of Conduct - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## Dependencies - -This project requires Golang 1.17 or newer to build. - -For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements). - -## Reporting Security Issues - -Security issues and bugs should be reported privately, via email, to the Microsoft Security -Response Center (MSRC) at [secure@microsoft.com](mailto:secure@microsoft.com). You should -receive a response within 24 hours. If for some reason you do not, please follow up via -email to ensure we received your original message. Further information, including the -[MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in -the [Security TechCenter](https://technet.microsoft.com/en-us/security/default). - -For additional details, see [Report a Computer Security Vulnerability](https://technet.microsoft.com/en-us/security/ff852094.aspx) on Technet - ---------------- -Copyright (c) 2018 Microsoft Corp. All rights reserved. diff --git a/test/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go b/test/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go deleted file mode 100644 index 9e28127151..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go +++ /dev/null @@ -1,2780 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto - -package stats - -import ( - fmt "fmt" - v1 "github.com/containerd/cgroups/stats/v1" - proto "github.com/gogo/protobuf/proto" - _ "github.com/gogo/protobuf/types" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Statistics struct { - // Types that are valid to be assigned to Container: - // *Statistics_Windows - // *Statistics_Linux - Container isStatistics_Container `protobuf_oneof:"container"` - VM *VirtualMachineStatistics `protobuf:"bytes,3,opt,name=vm,proto3" json:"vm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Statistics) Reset() { *m = Statistics{} } -func (*Statistics) ProtoMessage() {} -func (*Statistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{0} -} -func (m *Statistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Statistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Statistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Statistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_Statistics.Merge(m, src) -} -func (m *Statistics) XXX_Size() int { - return m.Size() -} -func (m *Statistics) XXX_DiscardUnknown() { - xxx_messageInfo_Statistics.DiscardUnknown(m) -} - -var xxx_messageInfo_Statistics proto.InternalMessageInfo - -type isStatistics_Container interface { - isStatistics_Container() - MarshalTo([]byte) (int, error) - Size() int -} - -type Statistics_Windows struct { - Windows *WindowsContainerStatistics `protobuf:"bytes,1,opt,name=windows,proto3,oneof" json:"windows,omitempty"` -} -type Statistics_Linux struct { - Linux *v1.Metrics `protobuf:"bytes,2,opt,name=linux,proto3,oneof" json:"linux,omitempty"` -} - -func (*Statistics_Windows) isStatistics_Container() {} -func (*Statistics_Linux) isStatistics_Container() {} - -func (m *Statistics) GetContainer() isStatistics_Container { - if m != nil { - return m.Container - } - return nil -} - -func (m *Statistics) GetWindows() *WindowsContainerStatistics { - if x, ok := m.GetContainer().(*Statistics_Windows); ok { - return x.Windows - } - return nil -} - -func (m *Statistics) GetLinux() *v1.Metrics { - if x, ok := m.GetContainer().(*Statistics_Linux); ok { - return x.Linux - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Statistics) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Statistics_Windows)(nil), - (*Statistics_Linux)(nil), - } -} - -type WindowsContainerStatistics struct { - Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - ContainerStartTime time.Time `protobuf:"bytes,2,opt,name=container_start_time,json=containerStartTime,proto3,stdtime" json:"container_start_time"` - UptimeNS uint64 `protobuf:"varint,3,opt,name=uptime_ns,json=uptimeNs,proto3" json:"uptime_ns,omitempty"` - Processor *WindowsContainerProcessorStatistics `protobuf:"bytes,4,opt,name=processor,proto3" json:"processor,omitempty"` - Memory *WindowsContainerMemoryStatistics `protobuf:"bytes,5,opt,name=memory,proto3" json:"memory,omitempty"` - Storage *WindowsContainerStorageStatistics `protobuf:"bytes,6,opt,name=storage,proto3" json:"storage,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WindowsContainerStatistics) Reset() { *m = WindowsContainerStatistics{} } -func (*WindowsContainerStatistics) ProtoMessage() {} -func (*WindowsContainerStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{1} -} -func (m *WindowsContainerStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WindowsContainerStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WindowsContainerStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WindowsContainerStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_WindowsContainerStatistics.Merge(m, src) -} -func (m *WindowsContainerStatistics) XXX_Size() int { - return m.Size() -} -func (m *WindowsContainerStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_WindowsContainerStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_WindowsContainerStatistics proto.InternalMessageInfo - -type WindowsContainerProcessorStatistics struct { - TotalRuntimeNS uint64 `protobuf:"varint,1,opt,name=total_runtime_ns,json=totalRuntimeNs,proto3" json:"total_runtime_ns,omitempty"` - RuntimeUserNS uint64 `protobuf:"varint,2,opt,name=runtime_user_ns,json=runtimeUserNs,proto3" json:"runtime_user_ns,omitempty"` - RuntimeKernelNS uint64 `protobuf:"varint,3,opt,name=runtime_kernel_ns,json=runtimeKernelNs,proto3" json:"runtime_kernel_ns,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WindowsContainerProcessorStatistics) Reset() { *m = WindowsContainerProcessorStatistics{} } -func (*WindowsContainerProcessorStatistics) ProtoMessage() {} -func (*WindowsContainerProcessorStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{2} -} -func (m *WindowsContainerProcessorStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WindowsContainerProcessorStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WindowsContainerProcessorStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WindowsContainerProcessorStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_WindowsContainerProcessorStatistics.Merge(m, src) -} -func (m *WindowsContainerProcessorStatistics) XXX_Size() int { - return m.Size() -} -func (m *WindowsContainerProcessorStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_WindowsContainerProcessorStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_WindowsContainerProcessorStatistics proto.InternalMessageInfo - -type WindowsContainerMemoryStatistics struct { - MemoryUsageCommitBytes uint64 `protobuf:"varint,1,opt,name=memory_usage_commit_bytes,json=memoryUsageCommitBytes,proto3" json:"memory_usage_commit_bytes,omitempty"` - MemoryUsageCommitPeakBytes uint64 `protobuf:"varint,2,opt,name=memory_usage_commit_peak_bytes,json=memoryUsageCommitPeakBytes,proto3" json:"memory_usage_commit_peak_bytes,omitempty"` - MemoryUsagePrivateWorkingSetBytes uint64 `protobuf:"varint,3,opt,name=memory_usage_private_working_set_bytes,json=memoryUsagePrivateWorkingSetBytes,proto3" json:"memory_usage_private_working_set_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WindowsContainerMemoryStatistics) Reset() { *m = WindowsContainerMemoryStatistics{} } -func (*WindowsContainerMemoryStatistics) ProtoMessage() {} -func (*WindowsContainerMemoryStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{3} -} -func (m *WindowsContainerMemoryStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WindowsContainerMemoryStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WindowsContainerMemoryStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WindowsContainerMemoryStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_WindowsContainerMemoryStatistics.Merge(m, src) -} -func (m *WindowsContainerMemoryStatistics) XXX_Size() int { - return m.Size() -} -func (m *WindowsContainerMemoryStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_WindowsContainerMemoryStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_WindowsContainerMemoryStatistics proto.InternalMessageInfo - -type WindowsContainerStorageStatistics struct { - ReadCountNormalized uint64 `protobuf:"varint,1,opt,name=read_count_normalized,json=readCountNormalized,proto3" json:"read_count_normalized,omitempty"` - ReadSizeBytes uint64 `protobuf:"varint,2,opt,name=read_size_bytes,json=readSizeBytes,proto3" json:"read_size_bytes,omitempty"` - WriteCountNormalized uint64 `protobuf:"varint,3,opt,name=write_count_normalized,json=writeCountNormalized,proto3" json:"write_count_normalized,omitempty"` - WriteSizeBytes uint64 `protobuf:"varint,4,opt,name=write_size_bytes,json=writeSizeBytes,proto3" json:"write_size_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WindowsContainerStorageStatistics) Reset() { *m = WindowsContainerStorageStatistics{} } -func (*WindowsContainerStorageStatistics) ProtoMessage() {} -func (*WindowsContainerStorageStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{4} -} -func (m *WindowsContainerStorageStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WindowsContainerStorageStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WindowsContainerStorageStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WindowsContainerStorageStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_WindowsContainerStorageStatistics.Merge(m, src) -} -func (m *WindowsContainerStorageStatistics) XXX_Size() int { - return m.Size() -} -func (m *WindowsContainerStorageStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_WindowsContainerStorageStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_WindowsContainerStorageStatistics proto.InternalMessageInfo - -type VirtualMachineStatistics struct { - Processor *VirtualMachineProcessorStatistics `protobuf:"bytes,1,opt,name=processor,proto3" json:"processor,omitempty"` - Memory *VirtualMachineMemoryStatistics `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VirtualMachineStatistics) Reset() { *m = VirtualMachineStatistics{} } -func (*VirtualMachineStatistics) ProtoMessage() {} -func (*VirtualMachineStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{5} -} -func (m *VirtualMachineStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VirtualMachineStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VirtualMachineStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VirtualMachineStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_VirtualMachineStatistics.Merge(m, src) -} -func (m *VirtualMachineStatistics) XXX_Size() int { - return m.Size() -} -func (m *VirtualMachineStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_VirtualMachineStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_VirtualMachineStatistics proto.InternalMessageInfo - -type VirtualMachineProcessorStatistics struct { - TotalRuntimeNS uint64 `protobuf:"varint,1,opt,name=total_runtime_ns,json=totalRuntimeNs,proto3" json:"total_runtime_ns,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VirtualMachineProcessorStatistics) Reset() { *m = VirtualMachineProcessorStatistics{} } -func (*VirtualMachineProcessorStatistics) ProtoMessage() {} -func (*VirtualMachineProcessorStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{6} -} -func (m *VirtualMachineProcessorStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VirtualMachineProcessorStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VirtualMachineProcessorStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VirtualMachineProcessorStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_VirtualMachineProcessorStatistics.Merge(m, src) -} -func (m *VirtualMachineProcessorStatistics) XXX_Size() int { - return m.Size() -} -func (m *VirtualMachineProcessorStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_VirtualMachineProcessorStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_VirtualMachineProcessorStatistics proto.InternalMessageInfo - -type VirtualMachineMemoryStatistics struct { - WorkingSetBytes uint64 `protobuf:"varint,1,opt,name=working_set_bytes,json=workingSetBytes,proto3" json:"working_set_bytes,omitempty"` - VirtualNodeCount uint32 `protobuf:"varint,2,opt,name=virtual_node_count,json=virtualNodeCount,proto3" json:"virtual_node_count,omitempty"` - VmMemory *VirtualMachineMemory `protobuf:"bytes,3,opt,name=vm_memory,json=vmMemory,proto3" json:"vm_memory,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VirtualMachineMemoryStatistics) Reset() { *m = VirtualMachineMemoryStatistics{} } -func (*VirtualMachineMemoryStatistics) ProtoMessage() {} -func (*VirtualMachineMemoryStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{7} -} -func (m *VirtualMachineMemoryStatistics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VirtualMachineMemoryStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VirtualMachineMemoryStatistics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VirtualMachineMemoryStatistics) XXX_Merge(src proto.Message) { - xxx_messageInfo_VirtualMachineMemoryStatistics.Merge(m, src) -} -func (m *VirtualMachineMemoryStatistics) XXX_Size() int { - return m.Size() -} -func (m *VirtualMachineMemoryStatistics) XXX_DiscardUnknown() { - xxx_messageInfo_VirtualMachineMemoryStatistics.DiscardUnknown(m) -} - -var xxx_messageInfo_VirtualMachineMemoryStatistics proto.InternalMessageInfo - -type VirtualMachineMemory struct { - AvailableMemory int32 `protobuf:"varint,1,opt,name=available_memory,json=availableMemory,proto3" json:"available_memory,omitempty"` - AvailableMemoryBuffer int32 `protobuf:"varint,2,opt,name=available_memory_buffer,json=availableMemoryBuffer,proto3" json:"available_memory_buffer,omitempty"` - ReservedMemory uint64 `protobuf:"varint,3,opt,name=reserved_memory,json=reservedMemory,proto3" json:"reserved_memory,omitempty"` - AssignedMemory uint64 `protobuf:"varint,4,opt,name=assigned_memory,json=assignedMemory,proto3" json:"assigned_memory,omitempty"` - SlpActive bool `protobuf:"varint,5,opt,name=slp_active,json=slpActive,proto3" json:"slp_active,omitempty"` - BalancingEnabled bool `protobuf:"varint,6,opt,name=balancing_enabled,json=balancingEnabled,proto3" json:"balancing_enabled,omitempty"` - DmOperationInProgress bool `protobuf:"varint,7,opt,name=dm_operation_in_progress,json=dmOperationInProgress,proto3" json:"dm_operation_in_progress,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VirtualMachineMemory) Reset() { *m = VirtualMachineMemory{} } -func (*VirtualMachineMemory) ProtoMessage() {} -func (*VirtualMachineMemory) Descriptor() ([]byte, []int) { - return fileDescriptor_23217f96da3a05cc, []int{8} -} -func (m *VirtualMachineMemory) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VirtualMachineMemory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VirtualMachineMemory.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VirtualMachineMemory) XXX_Merge(src proto.Message) { - xxx_messageInfo_VirtualMachineMemory.Merge(m, src) -} -func (m *VirtualMachineMemory) XXX_Size() int { - return m.Size() -} -func (m *VirtualMachineMemory) XXX_DiscardUnknown() { - xxx_messageInfo_VirtualMachineMemory.DiscardUnknown(m) -} - -var xxx_messageInfo_VirtualMachineMemory proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Statistics)(nil), "containerd.runhcs.stats.v1.Statistics") - proto.RegisterType((*WindowsContainerStatistics)(nil), "containerd.runhcs.stats.v1.WindowsContainerStatistics") - proto.RegisterType((*WindowsContainerProcessorStatistics)(nil), "containerd.runhcs.stats.v1.WindowsContainerProcessorStatistics") - proto.RegisterType((*WindowsContainerMemoryStatistics)(nil), "containerd.runhcs.stats.v1.WindowsContainerMemoryStatistics") - proto.RegisterType((*WindowsContainerStorageStatistics)(nil), "containerd.runhcs.stats.v1.WindowsContainerStorageStatistics") - proto.RegisterType((*VirtualMachineStatistics)(nil), "containerd.runhcs.stats.v1.VirtualMachineStatistics") - proto.RegisterType((*VirtualMachineProcessorStatistics)(nil), "containerd.runhcs.stats.v1.VirtualMachineProcessorStatistics") - proto.RegisterType((*VirtualMachineMemoryStatistics)(nil), "containerd.runhcs.stats.v1.VirtualMachineMemoryStatistics") - proto.RegisterType((*VirtualMachineMemory)(nil), "containerd.runhcs.stats.v1.VirtualMachineMemory") -} - -func init() { - proto.RegisterFile("github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto", fileDescriptor_23217f96da3a05cc) -} - -var fileDescriptor_23217f96da3a05cc = []byte{ - // 1037 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x6d, 0x6f, 0xdb, 0x44, - 0x1c, 0x8f, 0xb3, 0x3e, 0x24, 0x37, 0xda, 0xb4, 0xb7, 0x76, 0x84, 0x48, 0x24, 0x6b, 0x90, 0xf6, - 0x00, 0x34, 0xa1, 0xa3, 0x1a, 0x1a, 0x0c, 0x4d, 0xa4, 0x02, 0x0d, 0xb1, 0x84, 0x72, 0xe9, 0x03, - 0x02, 0x21, 0x73, 0xb1, 0xaf, 0xee, 0xa9, 0xb6, 0xcf, 0xba, 0x3b, 0xbb, 0xac, 0xaf, 0xf8, 0x08, - 0x7c, 0xac, 0x22, 0xde, 0xec, 0x25, 0xaf, 0x32, 0x96, 0x6f, 0x80, 0x84, 0x78, 0x3d, 0xf9, 0xee, - 0x9c, 0xb8, 0xed, 0xba, 0xb6, 0xda, 0x9b, 0xc8, 0xfe, 0xff, 0x1e, 0xee, 0xff, 0x70, 0xe7, 0x0b, - 0x78, 0xea, 0x51, 0xb9, 0x1f, 0x0f, 0x5a, 0x0e, 0x0b, 0xda, 0x5d, 0xea, 0x70, 0x26, 0xd8, 0x9e, - 0x6c, 0xef, 0x3b, 0x42, 0xec, 0xd3, 0xa0, 0xed, 0x04, 0x6e, 0xdb, 0x61, 0xa1, 0xc4, 0x34, 0x24, - 0xdc, 0x5d, 0x4d, 0x63, 0xab, 0x3c, 0x0e, 0xf7, 0x1d, 0xb1, 0x9a, 0xac, 0xb5, 0x85, 0xc4, 0x52, - 0xe8, 0xdf, 0x56, 0xc4, 0x99, 0x64, 0xb0, 0x36, 0x21, 0xb7, 0x34, 0xaf, 0xa5, 0xe1, 0x64, 0xad, - 0xb6, 0xe4, 0x31, 0x8f, 0x29, 0x5a, 0x3b, 0x7d, 0xd2, 0x8a, 0x5a, 0xc3, 0x63, 0xcc, 0xf3, 0x49, - 0x5b, 0xbd, 0x0d, 0xe2, 0xbd, 0xb6, 0xa4, 0x01, 0x11, 0x12, 0x07, 0x91, 0x21, 0xac, 0xe7, 0x12, - 0x9c, 0xb8, 0xb7, 0x1d, 0x8f, 0xb3, 0x38, 0x32, 0xab, 0xb7, 0x93, 0xb5, 0x76, 0x40, 0x24, 0xa7, - 0x8e, 0x49, 0xa4, 0xf9, 0xbf, 0x05, 0x40, 0x5f, 0x62, 0x49, 0x85, 0xa4, 0x8e, 0x80, 0x08, 0xcc, - 0x1e, 0xd2, 0xd0, 0x65, 0x87, 0xa2, 0x6a, 0xdd, 0xb2, 0xee, 0x5e, 0xbf, 0xff, 0xa0, 0x75, 0x7e, - 0xa6, 0xad, 0x5d, 0x4d, 0xdd, 0xc8, 0x18, 0x13, 0xa3, 0x27, 0x05, 0x94, 0x19, 0xc1, 0x87, 0x60, - 0xda, 0xa7, 0x61, 0xfc, 0x5b, 0xb5, 0xa8, 0x1c, 0x57, 0x5a, 0x94, 0xe5, 0x4d, 0x4d, 0x82, 0xa9, - 0x5f, 0x57, 0xa7, 0xf6, 0xa4, 0x80, 0xb4, 0x02, 0x3e, 0x05, 0xc5, 0x24, 0xa8, 0x5e, 0x53, 0xba, - 0xf5, 0x37, 0x65, 0xb2, 0x43, 0xb9, 0x8c, 0xb1, 0xdf, 0xc5, 0xce, 0x3e, 0x0d, 0xc9, 0x24, 0x8f, - 0xce, 0xcc, 0x68, 0xd8, 0x28, 0xee, 0x74, 0x51, 0x31, 0x09, 0x3a, 0xd7, 0x41, 0x79, 0x6c, 0xd1, - 0xfc, 0xf7, 0x1a, 0xa8, 0x9d, 0x9f, 0x3f, 0xec, 0x80, 0xf2, 0xb8, 0xc1, 0xa6, 0x15, 0xb5, 0x96, - 0x1e, 0x41, 0x2b, 0x1b, 0x41, 0x6b, 0x2b, 0x63, 0x74, 0x4a, 0xc7, 0xc3, 0x46, 0xe1, 0x8f, 0x17, - 0x0d, 0x0b, 0x4d, 0x64, 0x70, 0x07, 0x2c, 0x8d, 0xd7, 0xb3, 0x85, 0xc4, 0x5c, 0xda, 0x29, 0x68, - 0xfa, 0x70, 0x39, 0x3b, 0xe8, 0xe4, 0x92, 0xe3, 0x32, 0xa5, 0xc0, 0x7b, 0xa0, 0x1c, 0x47, 0xa9, - 0x93, 0x1d, 0x0a, 0xd5, 0x9c, 0xa9, 0xce, 0x3b, 0xa3, 0x61, 0xa3, 0xb4, 0xad, 0x82, 0xbd, 0x3e, - 0x2a, 0x69, 0xb8, 0x27, 0xe0, 0x2f, 0xa0, 0x1c, 0x71, 0xe6, 0x10, 0x21, 0x18, 0xaf, 0x4e, 0xa9, - 0x75, 0x1f, 0x5f, 0x65, 0xa2, 0x9b, 0x99, 0x78, 0xd2, 0x1a, 0x34, 0x71, 0x84, 0x5b, 0x60, 0x26, - 0x20, 0x01, 0xe3, 0xcf, 0xaa, 0xd3, 0xca, 0xfb, 0xd1, 0x55, 0xbc, 0xbb, 0x4a, 0x99, 0x33, 0x36, - 0x5e, 0x70, 0x17, 0xcc, 0x0a, 0xc9, 0x38, 0xf6, 0x48, 0x75, 0x46, 0xd9, 0x7e, 0x79, 0xb5, 0x4d, - 0xa8, 0xa4, 0x39, 0xdf, 0xcc, 0xad, 0xf9, 0xc2, 0x02, 0x1f, 0x5c, 0xa2, 0x42, 0xf8, 0x08, 0x2c, - 0x48, 0x26, 0xb1, 0x6f, 0xf3, 0x38, 0xcc, 0xfa, 0x6c, 0xa9, 0x3e, 0xc3, 0xd1, 0xb0, 0x31, 0xbf, - 0x95, 0x62, 0x48, 0x43, 0xbd, 0x3e, 0x9a, 0x97, 0xf9, 0xf7, 0x74, 0xbf, 0x57, 0x32, 0x5d, 0x2c, - 0x08, 0x4f, 0xc5, 0x45, 0x25, 0x5e, 0x1c, 0x0d, 0x1b, 0x73, 0x86, 0xb7, 0x2d, 0x08, 0xef, 0xf5, - 0xd1, 0x1c, 0xcf, 0xbd, 0x0a, 0xf8, 0x18, 0x2c, 0x66, 0xd2, 0x03, 0xc2, 0x43, 0xe2, 0x4f, 0x26, - 0x7c, 0x63, 0x34, 0x6c, 0x54, 0x8c, 0xf8, 0x3b, 0x85, 0xf5, 0xfa, 0x28, 0x5b, 0xc8, 0x04, 0x44, - 0xf3, 0x3f, 0x0b, 0xdc, 0xba, 0xa8, 0xcf, 0xf0, 0x21, 0x78, 0x4f, 0x77, 0xda, 0x8e, 0x05, 0xf6, - 0x88, 0xed, 0xb0, 0x20, 0xa0, 0xd2, 0x1e, 0x3c, 0x93, 0xc4, 0xd4, 0x89, 0x6e, 0x6a, 0xc2, 0x76, - 0x8a, 0x6f, 0x28, 0xb8, 0x93, 0xa2, 0xb0, 0x03, 0xea, 0xaf, 0x93, 0x46, 0x04, 0x1f, 0x18, 0xbd, - 0x2a, 0x15, 0xd5, 0xce, 0xe8, 0x37, 0x09, 0x3e, 0xd0, 0x1e, 0x3f, 0x80, 0xdb, 0x27, 0x3c, 0x22, - 0x4e, 0x13, 0x2c, 0x89, 0x7d, 0xc8, 0xf8, 0x01, 0x0d, 0x3d, 0x5b, 0x90, 0x2c, 0x17, 0x55, 0x39, - 0x5a, 0xc9, 0x79, 0x6d, 0x6a, 0xee, 0xae, 0xa6, 0xf6, 0x89, 0x4e, 0x2b, 0x1d, 0xec, 0xca, 0x85, - 0xfb, 0x00, 0xde, 0x07, 0xcb, 0x9c, 0x60, 0xd7, 0x76, 0x58, 0x1c, 0x4a, 0x3b, 0x64, 0x3c, 0xc0, - 0x3e, 0x3d, 0x22, 0xae, 0xa9, 0xf9, 0x46, 0x0a, 0x6e, 0xa4, 0x58, 0x6f, 0x0c, 0xc1, 0xdb, 0xa0, - 0xa2, 0x34, 0x82, 0x1e, 0x91, 0x13, 0x15, 0xce, 0xa5, 0xe1, 0x3e, 0x3d, 0x22, 0xba, 0xa8, 0x75, - 0x70, 0xf3, 0x90, 0x53, 0x49, 0xce, 0x9a, 0xeb, 0x22, 0x96, 0x14, 0x7a, 0xda, 0xfd, 0x2e, 0x58, - 0xd0, 0xaa, 0x9c, 0xfd, 0x94, 0xe2, 0xcf, 0xab, 0xf8, 0xd8, 0xbf, 0xf9, 0x97, 0x05, 0xaa, 0xe7, - 0x7d, 0xe4, 0xe0, 0xcf, 0xf9, 0x53, 0x6e, 0x5d, 0x7c, 0x64, 0x4e, 0x1a, 0x5d, 0x70, 0xc6, 0xd1, - 0xf8, 0x8c, 0xeb, 0xef, 0xd6, 0xe7, 0x97, 0x77, 0x3e, 0xef, 0x84, 0x37, 0x31, 0x58, 0xb9, 0x30, - 0x87, 0xb7, 0x3b, 0x85, 0xcd, 0x3f, 0x2d, 0x50, 0x7f, 0x73, 0x36, 0xf0, 0x43, 0xb0, 0x78, 0x76, - 0xcf, 0xe9, 0xbd, 0x50, 0x39, 0x3c, 0xb9, 0xc3, 0xe0, 0xc7, 0x00, 0x26, 0xda, 0xcd, 0x0e, 0x99, - 0x6b, 0xc6, 0xac, 0x3a, 0x32, 0x87, 0x16, 0x0c, 0xd2, 0x63, 0xae, 0x9e, 0x30, 0xec, 0x82, 0x72, - 0x12, 0xd8, 0xa6, 0x6d, 0xfa, 0xfa, 0xfa, 0xe4, 0xaa, 0x6d, 0x43, 0xa5, 0x24, 0xd0, 0x4f, 0xcd, - 0xe7, 0x45, 0xb0, 0xf4, 0x3a, 0x0a, 0xbc, 0x07, 0x16, 0x70, 0x82, 0xa9, 0x8f, 0x07, 0x3e, 0xc9, - 0x96, 0x4b, 0x0b, 0x98, 0x46, 0x95, 0x71, 0xdc, 0x50, 0x1f, 0x80, 0x77, 0x4f, 0x53, 0xed, 0x41, - 0xbc, 0xb7, 0x47, 0xb8, 0xaa, 0x62, 0x1a, 0x2d, 0x9f, 0x52, 0x74, 0x14, 0x08, 0xef, 0xa4, 0x07, - 0x40, 0x10, 0x9e, 0x10, 0x37, 0x5f, 0xd0, 0x14, 0x9a, 0xcf, 0xc2, 0x66, 0x81, 0x3b, 0xa0, 0x82, - 0x85, 0xa0, 0x5e, 0x38, 0x21, 0x9a, 0xad, 0x9c, 0x85, 0x0d, 0xf1, 0x7d, 0x00, 0x84, 0x1f, 0xd9, - 0xd8, 0x91, 0x34, 0x21, 0xea, 0xe2, 0x28, 0xa1, 0xb2, 0xf0, 0xa3, 0xaf, 0x54, 0x00, 0x7e, 0x04, - 0x16, 0x07, 0xd8, 0xc7, 0xa1, 0x93, 0xce, 0x85, 0x84, 0x69, 0x42, 0xae, 0xba, 0x07, 0x4a, 0x68, - 0x61, 0x0c, 0x7c, 0xad, 0xe3, 0xf0, 0x33, 0x50, 0x75, 0x03, 0x9b, 0x45, 0x84, 0x63, 0x49, 0x59, - 0x68, 0xd3, 0xd0, 0x8e, 0x38, 0xf3, 0x38, 0x11, 0xa2, 0x3a, 0xab, 0x34, 0xcb, 0x6e, 0xf0, 0x7d, - 0x06, 0x7f, 0x1b, 0x6e, 0x1a, 0xb0, 0xf3, 0xeb, 0xf1, 0xcb, 0x7a, 0xe1, 0xef, 0x97, 0xf5, 0xc2, - 0xef, 0xa3, 0xba, 0x75, 0x3c, 0xaa, 0x5b, 0xcf, 0x47, 0x75, 0xeb, 0x9f, 0x51, 0xdd, 0xfa, 0xe9, - 0x9b, 0xb7, 0xfd, 0xa3, 0xf7, 0x85, 0xfa, 0xfd, 0xb1, 0x30, 0x98, 0x51, 0x37, 0xfb, 0xa7, 0xaf, - 0x02, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xe8, 0x0f, 0x6d, 0x3b, 0x0a, 0x00, 0x00, -} - -func (m *Statistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Statistics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Statistics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.VM != nil { - { - size, err := m.VM.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStats(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Container != nil { - { - size := m.Container.Size() - i -= size - if _, err := m.Container.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *Statistics_Windows) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Statistics_Windows) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Windows != nil { - { - size, err := m.Windows.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStats(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *Statistics_Linux) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Statistics_Linux) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Linux != nil { - { - size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStats(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *WindowsContainerStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WindowsContainerStatistics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WindowsContainerStatistics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Storage != nil { - { - size, err := m.Storage.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStats(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Memory != nil { - { - size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStats(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Processor != nil { - { - size, err := m.Processor.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStats(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.UptimeNS != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.UptimeNS)) - i-- - dAtA[i] = 0x18 - } - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ContainerStartTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ContainerStartTime):]) - if err7 != nil { - return 0, err7 - } - i -= n7 - i = encodeVarintStats(dAtA, i, uint64(n7)) - i-- - dAtA[i] = 0x12 - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err8 != nil { - return 0, err8 - } - i -= n8 - i = encodeVarintStats(dAtA, i, uint64(n8)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WindowsContainerProcessorStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WindowsContainerProcessorStatistics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WindowsContainerProcessorStatistics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.RuntimeKernelNS != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.RuntimeKernelNS)) - i-- - dAtA[i] = 0x18 - } - if m.RuntimeUserNS != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.RuntimeUserNS)) - i-- - dAtA[i] = 0x10 - } - if m.TotalRuntimeNS != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.TotalRuntimeNS)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *WindowsContainerMemoryStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WindowsContainerMemoryStatistics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WindowsContainerMemoryStatistics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.MemoryUsagePrivateWorkingSetBytes != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.MemoryUsagePrivateWorkingSetBytes)) - i-- - dAtA[i] = 0x18 - } - if m.MemoryUsageCommitPeakBytes != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.MemoryUsageCommitPeakBytes)) - i-- - dAtA[i] = 0x10 - } - if m.MemoryUsageCommitBytes != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.MemoryUsageCommitBytes)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *WindowsContainerStorageStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WindowsContainerStorageStatistics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WindowsContainerStorageStatistics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.WriteSizeBytes != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.WriteSizeBytes)) - i-- - dAtA[i] = 0x20 - } - if m.WriteCountNormalized != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.WriteCountNormalized)) - i-- - dAtA[i] = 0x18 - } - if m.ReadSizeBytes != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.ReadSizeBytes)) - i-- - dAtA[i] = 0x10 - } - if m.ReadCountNormalized != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.ReadCountNormalized)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *VirtualMachineStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VirtualMachineStatistics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VirtualMachineStatistics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Memory != nil { - { - size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStats(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Processor != nil { - { - size, err := m.Processor.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStats(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *VirtualMachineProcessorStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VirtualMachineProcessorStatistics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VirtualMachineProcessorStatistics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.TotalRuntimeNS != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.TotalRuntimeNS)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *VirtualMachineMemoryStatistics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VirtualMachineMemoryStatistics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VirtualMachineMemoryStatistics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.VmMemory != nil { - { - size, err := m.VmMemory.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStats(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.VirtualNodeCount != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.VirtualNodeCount)) - i-- - dAtA[i] = 0x10 - } - if m.WorkingSetBytes != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.WorkingSetBytes)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *VirtualMachineMemory) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VirtualMachineMemory) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VirtualMachineMemory) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.DmOperationInProgress { - i-- - if m.DmOperationInProgress { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if m.BalancingEnabled { - i-- - if m.BalancingEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if m.SlpActive { - i-- - if m.SlpActive { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.AssignedMemory != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.AssignedMemory)) - i-- - dAtA[i] = 0x20 - } - if m.ReservedMemory != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.ReservedMemory)) - i-- - dAtA[i] = 0x18 - } - if m.AvailableMemoryBuffer != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.AvailableMemoryBuffer)) - i-- - dAtA[i] = 0x10 - } - if m.AvailableMemory != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.AvailableMemory)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintStats(dAtA []byte, offset int, v uint64) int { - offset -= sovStats(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Statistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Container != nil { - n += m.Container.Size() - } - if m.VM != nil { - l = m.VM.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Statistics_Windows) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Windows != nil { - l = m.Windows.Size() - n += 1 + l + sovStats(uint64(l)) - } - return n -} -func (m *Statistics_Linux) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Linux != nil { - l = m.Linux.Size() - n += 1 + l + sovStats(uint64(l)) - } - return n -} -func (m *WindowsContainerStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovStats(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ContainerStartTime) - n += 1 + l + sovStats(uint64(l)) - if m.UptimeNS != 0 { - n += 1 + sovStats(uint64(m.UptimeNS)) - } - if m.Processor != nil { - l = m.Processor.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.Memory != nil { - l = m.Memory.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.Storage != nil { - l = m.Storage.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WindowsContainerProcessorStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TotalRuntimeNS != 0 { - n += 1 + sovStats(uint64(m.TotalRuntimeNS)) - } - if m.RuntimeUserNS != 0 { - n += 1 + sovStats(uint64(m.RuntimeUserNS)) - } - if m.RuntimeKernelNS != 0 { - n += 1 + sovStats(uint64(m.RuntimeKernelNS)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WindowsContainerMemoryStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MemoryUsageCommitBytes != 0 { - n += 1 + sovStats(uint64(m.MemoryUsageCommitBytes)) - } - if m.MemoryUsageCommitPeakBytes != 0 { - n += 1 + sovStats(uint64(m.MemoryUsageCommitPeakBytes)) - } - if m.MemoryUsagePrivateWorkingSetBytes != 0 { - n += 1 + sovStats(uint64(m.MemoryUsagePrivateWorkingSetBytes)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WindowsContainerStorageStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ReadCountNormalized != 0 { - n += 1 + sovStats(uint64(m.ReadCountNormalized)) - } - if m.ReadSizeBytes != 0 { - n += 1 + sovStats(uint64(m.ReadSizeBytes)) - } - if m.WriteCountNormalized != 0 { - n += 1 + sovStats(uint64(m.WriteCountNormalized)) - } - if m.WriteSizeBytes != 0 { - n += 1 + sovStats(uint64(m.WriteSizeBytes)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *VirtualMachineStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Processor != nil { - l = m.Processor.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.Memory != nil { - l = m.Memory.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *VirtualMachineProcessorStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TotalRuntimeNS != 0 { - n += 1 + sovStats(uint64(m.TotalRuntimeNS)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *VirtualMachineMemoryStatistics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.WorkingSetBytes != 0 { - n += 1 + sovStats(uint64(m.WorkingSetBytes)) - } - if m.VirtualNodeCount != 0 { - n += 1 + sovStats(uint64(m.VirtualNodeCount)) - } - if m.VmMemory != nil { - l = m.VmMemory.Size() - n += 1 + l + sovStats(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *VirtualMachineMemory) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.AvailableMemory != 0 { - n += 1 + sovStats(uint64(m.AvailableMemory)) - } - if m.AvailableMemoryBuffer != 0 { - n += 1 + sovStats(uint64(m.AvailableMemoryBuffer)) - } - if m.ReservedMemory != 0 { - n += 1 + sovStats(uint64(m.ReservedMemory)) - } - if m.AssignedMemory != 0 { - n += 1 + sovStats(uint64(m.AssignedMemory)) - } - if m.SlpActive { - n += 2 - } - if m.BalancingEnabled { - n += 2 - } - if m.DmOperationInProgress { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovStats(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozStats(x uint64) (n int) { - return sovStats(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Statistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Statistics{`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `VM:` + strings.Replace(this.VM.String(), "VirtualMachineStatistics", "VirtualMachineStatistics", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Statistics_Windows) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Statistics_Windows{`, - `Windows:` + strings.Replace(fmt.Sprintf("%v", this.Windows), "WindowsContainerStatistics", "WindowsContainerStatistics", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Statistics_Linux) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Statistics_Linux{`, - `Linux:` + strings.Replace(fmt.Sprintf("%v", this.Linux), "Metrics", "v1.Metrics", 1) + `,`, - `}`, - }, "") - return s -} -func (this *WindowsContainerStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WindowsContainerStatistics{`, - `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `ContainerStartTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerStartTime), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UptimeNS:` + fmt.Sprintf("%v", this.UptimeNS) + `,`, - `Processor:` + strings.Replace(this.Processor.String(), "WindowsContainerProcessorStatistics", "WindowsContainerProcessorStatistics", 1) + `,`, - `Memory:` + strings.Replace(this.Memory.String(), "WindowsContainerMemoryStatistics", "WindowsContainerMemoryStatistics", 1) + `,`, - `Storage:` + strings.Replace(this.Storage.String(), "WindowsContainerStorageStatistics", "WindowsContainerStorageStatistics", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WindowsContainerProcessorStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WindowsContainerProcessorStatistics{`, - `TotalRuntimeNS:` + fmt.Sprintf("%v", this.TotalRuntimeNS) + `,`, - `RuntimeUserNS:` + fmt.Sprintf("%v", this.RuntimeUserNS) + `,`, - `RuntimeKernelNS:` + fmt.Sprintf("%v", this.RuntimeKernelNS) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WindowsContainerMemoryStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WindowsContainerMemoryStatistics{`, - `MemoryUsageCommitBytes:` + fmt.Sprintf("%v", this.MemoryUsageCommitBytes) + `,`, - `MemoryUsageCommitPeakBytes:` + fmt.Sprintf("%v", this.MemoryUsageCommitPeakBytes) + `,`, - `MemoryUsagePrivateWorkingSetBytes:` + fmt.Sprintf("%v", this.MemoryUsagePrivateWorkingSetBytes) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WindowsContainerStorageStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WindowsContainerStorageStatistics{`, - `ReadCountNormalized:` + fmt.Sprintf("%v", this.ReadCountNormalized) + `,`, - `ReadSizeBytes:` + fmt.Sprintf("%v", this.ReadSizeBytes) + `,`, - `WriteCountNormalized:` + fmt.Sprintf("%v", this.WriteCountNormalized) + `,`, - `WriteSizeBytes:` + fmt.Sprintf("%v", this.WriteSizeBytes) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *VirtualMachineStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VirtualMachineStatistics{`, - `Processor:` + strings.Replace(this.Processor.String(), "VirtualMachineProcessorStatistics", "VirtualMachineProcessorStatistics", 1) + `,`, - `Memory:` + strings.Replace(this.Memory.String(), "VirtualMachineMemoryStatistics", "VirtualMachineMemoryStatistics", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *VirtualMachineProcessorStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VirtualMachineProcessorStatistics{`, - `TotalRuntimeNS:` + fmt.Sprintf("%v", this.TotalRuntimeNS) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *VirtualMachineMemoryStatistics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VirtualMachineMemoryStatistics{`, - `WorkingSetBytes:` + fmt.Sprintf("%v", this.WorkingSetBytes) + `,`, - `VirtualNodeCount:` + fmt.Sprintf("%v", this.VirtualNodeCount) + `,`, - `VmMemory:` + strings.Replace(this.VmMemory.String(), "VirtualMachineMemory", "VirtualMachineMemory", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *VirtualMachineMemory) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VirtualMachineMemory{`, - `AvailableMemory:` + fmt.Sprintf("%v", this.AvailableMemory) + `,`, - `AvailableMemoryBuffer:` + fmt.Sprintf("%v", this.AvailableMemoryBuffer) + `,`, - `ReservedMemory:` + fmt.Sprintf("%v", this.ReservedMemory) + `,`, - `AssignedMemory:` + fmt.Sprintf("%v", this.AssignedMemory) + `,`, - `SlpActive:` + fmt.Sprintf("%v", this.SlpActive) + `,`, - `BalancingEnabled:` + fmt.Sprintf("%v", this.BalancingEnabled) + `,`, - `DmOperationInProgress:` + fmt.Sprintf("%v", this.DmOperationInProgress) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringStats(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Statistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Statistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Statistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Windows", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &WindowsContainerStatistics{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Container = &Statistics_Windows{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &v1.Metrics{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Container = &Statistics_Linux{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VM", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VM == nil { - m.VM = &VirtualMachineStatistics{} - } - if err := m.VM.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WindowsContainerStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WindowsContainerStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WindowsContainerStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerStartTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ContainerStartTime, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UptimeNS", wireType) - } - m.UptimeNS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UptimeNS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Processor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Processor == nil { - m.Processor = &WindowsContainerProcessorStatistics{} - } - if err := m.Processor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Memory == nil { - m.Memory = &WindowsContainerMemoryStatistics{} - } - if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Storage == nil { - m.Storage = &WindowsContainerStorageStatistics{} - } - if err := m.Storage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WindowsContainerProcessorStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WindowsContainerProcessorStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WindowsContainerProcessorStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalRuntimeNS", wireType) - } - m.TotalRuntimeNS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalRuntimeNS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeUserNS", wireType) - } - m.RuntimeUserNS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RuntimeUserNS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeKernelNS", wireType) - } - m.RuntimeKernelNS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RuntimeKernelNS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WindowsContainerMemoryStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WindowsContainerMemoryStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WindowsContainerMemoryStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryUsageCommitBytes", wireType) - } - m.MemoryUsageCommitBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryUsageCommitBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryUsageCommitPeakBytes", wireType) - } - m.MemoryUsageCommitPeakBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryUsageCommitPeakBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryUsagePrivateWorkingSetBytes", wireType) - } - m.MemoryUsagePrivateWorkingSetBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryUsagePrivateWorkingSetBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WindowsContainerStorageStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WindowsContainerStorageStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WindowsContainerStorageStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadCountNormalized", wireType) - } - m.ReadCountNormalized = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadCountNormalized |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadSizeBytes", wireType) - } - m.ReadSizeBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadSizeBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WriteCountNormalized", wireType) - } - m.WriteCountNormalized = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WriteCountNormalized |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WriteSizeBytes", wireType) - } - m.WriteSizeBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WriteSizeBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VirtualMachineStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VirtualMachineStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VirtualMachineStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Processor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Processor == nil { - m.Processor = &VirtualMachineProcessorStatistics{} - } - if err := m.Processor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Memory == nil { - m.Memory = &VirtualMachineMemoryStatistics{} - } - if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VirtualMachineProcessorStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VirtualMachineProcessorStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VirtualMachineProcessorStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalRuntimeNS", wireType) - } - m.TotalRuntimeNS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalRuntimeNS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VirtualMachineMemoryStatistics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VirtualMachineMemoryStatistics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VirtualMachineMemoryStatistics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingSetBytes", wireType) - } - m.WorkingSetBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WorkingSetBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field VirtualNodeCount", wireType) - } - m.VirtualNodeCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.VirtualNodeCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VmMemory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VmMemory == nil { - m.VmMemory = &VirtualMachineMemory{} - } - if err := m.VmMemory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VirtualMachineMemory) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VirtualMachineMemory: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VirtualMachineMemory: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableMemory", wireType) - } - m.AvailableMemory = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableMemory |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableMemoryBuffer", wireType) - } - m.AvailableMemoryBuffer = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableMemoryBuffer |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReservedMemory", wireType) - } - m.ReservedMemory = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReservedMemory |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AssignedMemory", wireType) - } - m.AssignedMemory = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AssignedMemory |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SlpActive", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SlpActive = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BalancingEnabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.BalancingEnabled = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DmOperationInProgress", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DmOperationInProgress = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipStats(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthStats - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupStats - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthStats - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthStats = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowStats = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupStats = fmt.Errorf("proto: unexpected end of group") -) diff --git a/test/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/test/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go deleted file mode 100644 index 54c4b3bc4a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build windows - -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// AttachLayerStorageFilter sets up the layer storage filter on a writable -// container layer. -// -// `layerPath` is a path to a directory the writable layer is mounted. If the -// path does not end in a `\` the platform will append it automatically. -// -// `layerData` is the parent read-only layer data. -func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) { - title := "hcsshim::AttachLayerStorageFilter" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - ) - - bytes, err := json.Marshal(layerData) - if err != nil { - return err - } - - err = hcsAttachLayerStorageFilter(layerPath, string(bytes)) - if err != nil { - return errors.Wrap(err, "failed to attach layer storage filter") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go b/test/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go deleted file mode 100644 index 5058d3b55e..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build windows - -package computestorage - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// DestroyLayer deletes a container layer. -// -// `layerPath` is a path to a directory containing the layer to export. -func DestroyLayer(ctx context.Context, layerPath string) (err error) { - title := "hcsshim::DestroyLayer" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) - - err = hcsDestroyLayer(layerPath) - if err != nil { - return errors.Wrap(err, "failed to destroy layer") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/test/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go deleted file mode 100644 index daf1bfff20..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build windows - -package computestorage - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// DetachLayerStorageFilter detaches the layer storage filter on a writable container layer. -// -// `layerPath` is a path to a directory containing the layer to export. -func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) { - title := "hcsshim::DetachLayerStorageFilter" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) - - err = hcsDetachLayerStorageFilter(layerPath) - if err != nil { - return errors.Wrap(err, "failed to detach layer storage filter") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/computestorage/export.go b/test/vendor/github.com/Microsoft/hcsshim/computestorage/export.go deleted file mode 100644 index c6370a5c9a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/computestorage/export.go +++ /dev/null @@ -1,48 +0,0 @@ -//go:build windows - -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// ExportLayer exports a container layer. -// -// `layerPath` is a path to a directory containing the layer to export. -// -// `exportFolderPath` is a pre-existing folder to export the layer to. -// -// `layerData` is the parent layer data. -// -// `options` are the export options applied to the exported layer. -func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) { - title := "hcsshim::ExportLayer" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - trace.StringAttribute("exportFolderPath", exportFolderPath), - ) - - ldBytes, err := json.Marshal(layerData) - if err != nil { - return err - } - - oBytes, err := json.Marshal(options) - if err != nil { - return err - } - - err = hcsExportLayer(layerPath, exportFolderPath, string(ldBytes), string(oBytes)) - if err != nil { - return errors.Wrap(err, "failed to export layer") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/computestorage/format.go b/test/vendor/github.com/Microsoft/hcsshim/computestorage/format.go deleted file mode 100644 index 4a5735e989..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/computestorage/format.go +++ /dev/null @@ -1,77 +0,0 @@ -//go:build windows - -package computestorage - -import ( - "context" - "os" - "syscall" - - "github.com/Microsoft/go-winio/vhd" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/osversion" - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -func openDisk(path string) (windows.Handle, error) { - u16, err := windows.UTF16PtrFromString(path) - if err != nil { - return 0, err - } - h, err := windows.CreateFile( - u16, - windows.GENERIC_READ|windows.GENERIC_WRITE, - windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE, - nil, - windows.OPEN_EXISTING, - windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_NO_BUFFERING, - 0, - ) - if err != nil { - return 0, &os.PathError{ - Op: "CreateFile", - Path: path, - Err: err, - } - } - return h, nil -} - -// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer. -// -// If the VHD is not mounted it will be temporarily mounted. -func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) { - title := "hcsshim::FormatWritableLayerVhd" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - h := vhdHandle - // On RS5 HcsFormatWritableLayerVhd expects to receive a disk handle instead of a vhd handle. - if osversion.Build() < osversion.V19H1 { - if err := vhd.AttachVirtualDisk(syscall.Handle(vhdHandle), vhd.AttachVirtualDiskFlagNone, &vhd.AttachVirtualDiskParameters{Version: 1}); err != nil { - return err - } - defer func() { - if detachErr := vhd.DetachVirtualDisk(syscall.Handle(vhdHandle)); err == nil && detachErr != nil { - err = detachErr - } - }() - diskPath, err := vhd.GetVirtualDiskPhysicalPath(syscall.Handle(vhdHandle)) - if err != nil { - return err - } - diskHandle, err := openDisk(diskPath) - if err != nil { - return err - } - defer windows.CloseHandle(diskHandle) // nolint: errcheck - h = diskHandle - } - err = hcsFormatWritableLayerVhd(h) - if err != nil { - return errors.Wrap(err, "failed to format writable layer vhd") - } - return -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go b/test/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go deleted file mode 100644 index c3608dcec8..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go +++ /dev/null @@ -1,197 +0,0 @@ -//go:build windows - -package computestorage - -import ( - "context" - "os" - "path/filepath" - "syscall" - - "github.com/Microsoft/go-winio/vhd" - "github.com/Microsoft/hcsshim/internal/memory" - "github.com/pkg/errors" - "golang.org/x/sys/windows" - - "github.com/Microsoft/hcsshim/internal/security" -) - -const defaultVHDXBlockSizeInMB = 1 - -// SetupContainerBaseLayer is a helper to setup a containers scratch. It -// will create and format the vhdx's inside and the size is configurable with the sizeInGB -// parameter. -// -// `layerPath` is the path to the base container layer on disk. -// -// `baseVhdPath` is the path to where the base vhdx for the base layer should be created. -// -// `diffVhdPath` is the path where the differencing disk for the base layer should be created. -// -// `sizeInGB` is the size in gigabytes to make the base vhdx. -func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { - var ( - hivesPath = filepath.Join(layerPath, "Hives") - layoutPath = filepath.Join(layerPath, "Layout") - ) - - // We need to remove the hives directory and layout file as `SetupBaseOSLayer` fails if these files - // already exist. `SetupBaseOSLayer` will create these files internally. We also remove the base and - // differencing disks if they exist in case we're asking for a different size. - if _, err := os.Stat(hivesPath); err == nil { - if err := os.RemoveAll(hivesPath); err != nil { - return errors.Wrap(err, "failed to remove prexisting hives directory") - } - } - if _, err := os.Stat(layoutPath); err == nil { - if err := os.RemoveAll(layoutPath); err != nil { - return errors.Wrap(err, "failed to remove prexisting layout file") - } - } - - if _, err := os.Stat(baseVhdPath); err == nil { - if err := os.RemoveAll(baseVhdPath); err != nil { - return errors.Wrap(err, "failed to remove base vhdx path") - } - } - if _, err := os.Stat(diffVhdPath); err == nil { - if err := os.RemoveAll(diffVhdPath); err != nil { - return errors.Wrap(err, "failed to remove differencing vhdx") - } - } - - createParams := &vhd.CreateVirtualDiskParameters{ - Version: 2, - Version2: vhd.CreateVersion2{ - MaximumSize: sizeInGB * memory.GiB, - BlockSizeInBytes: defaultVHDXBlockSizeInMB * memory.MiB, - }, - } - handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) - if err != nil { - return errors.Wrap(err, "failed to create vhdx") - } - - defer func() { - if err != nil { - _ = syscall.CloseHandle(handle) - os.RemoveAll(baseVhdPath) - os.RemoveAll(diffVhdPath) - } - }() - - if err = FormatWritableLayerVhd(ctx, windows.Handle(handle)); err != nil { - return err - } - // Base vhd handle must be closed before calling SetupBaseLayer in case of Container layer - if err = syscall.CloseHandle(handle); err != nil { - return errors.Wrap(err, "failed to close vhdx handle") - } - - options := OsLayerOptions{ - Type: OsLayerTypeContainer, - } - - // SetupBaseOSLayer expects an empty vhd handle for a container layer and will - // error out otherwise. - if err = SetupBaseOSLayer(ctx, layerPath, 0, options); err != nil { - return err - } - // Create the differencing disk that will be what's copied for the final rw layer - // for a container. - if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { - return errors.Wrap(err, "failed to create differencing disk") - } - - if err = security.GrantVmGroupAccess(baseVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) - } - if err = security.GrantVmGroupAccess(diffVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) - } - return nil -} - -// SetupUtilityVMBaseLayer is a helper to setup a UVMs scratch space. It will create and format -// the vhdx inside and the size is configurable by the sizeInGB parameter. -// -// `uvmPath` is the path to the UtilityVM filesystem. -// -// `baseVhdPath` is the path to where the base vhdx for the UVM should be created. -// -// `diffVhdPath` is the path where the differencing disk for the UVM should be created. -// -// `sizeInGB` specifies the size in gigabytes to make the base vhdx. -func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { - // Remove the base and differencing disks if they exist in case we're asking for a different size. - if _, err := os.Stat(baseVhdPath); err == nil { - if err := os.RemoveAll(baseVhdPath); err != nil { - return errors.Wrap(err, "failed to remove base vhdx") - } - } - if _, err := os.Stat(diffVhdPath); err == nil { - if err := os.RemoveAll(diffVhdPath); err != nil { - return errors.Wrap(err, "failed to remove differencing vhdx") - } - } - - // Just create the vhdx for utilityVM layer, no need to format it. - createParams := &vhd.CreateVirtualDiskParameters{ - Version: 2, - Version2: vhd.CreateVersion2{ - MaximumSize: sizeInGB * memory.GiB, - BlockSizeInBytes: defaultVHDXBlockSizeInMB * memory.MiB, - }, - } - handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) - if err != nil { - return errors.Wrap(err, "failed to create vhdx") - } - - defer func() { - if err != nil { - _ = syscall.CloseHandle(handle) - os.RemoveAll(baseVhdPath) - os.RemoveAll(diffVhdPath) - } - }() - - // If it is a UtilityVM layer then the base vhdx must be attached when calling - // `SetupBaseOSLayer` - attachParams := &vhd.AttachVirtualDiskParameters{ - Version: 2, - } - if err := vhd.AttachVirtualDisk(handle, vhd.AttachVirtualDiskFlagNone, attachParams); err != nil { - return errors.Wrapf(err, "failed to attach virtual disk") - } - - options := OsLayerOptions{ - Type: OsLayerTypeVM, - } - if err := SetupBaseOSLayer(ctx, uvmPath, windows.Handle(handle), options); err != nil { - return err - } - - // Detach and close the handle after setting up the layer as we don't need the handle - // for anything else and we no longer need to be attached either. - if err = vhd.DetachVirtualDisk(handle); err != nil { - return errors.Wrap(err, "failed to detach vhdx") - } - if err = syscall.CloseHandle(handle); err != nil { - return errors.Wrap(err, "failed to close vhdx handle") - } - - // Create the differencing disk that will be what's copied for the final rw layer - // for a container. - if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { - return errors.Wrap(err, "failed to create differencing disk") - } - - if err := security.GrantVmGroupAccess(baseVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) - } - if err := security.GrantVmGroupAccess(diffVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/computestorage/import.go b/test/vendor/github.com/Microsoft/hcsshim/computestorage/import.go deleted file mode 100644 index e1c87416a3..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/computestorage/import.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build windows - -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// ImportLayer imports a container layer. -// -// `layerPath` is a path to a directory to import the layer to. If the directory -// does not exist it will be automatically created. -// -// `sourceFolderpath` is a pre-existing folder that contains the layer to -// import. -// -// `layerData` is the parent layer data. -func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) { - title := "hcsshim::ImportLayer" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - trace.StringAttribute("sourceFolderPath", sourceFolderPath), - ) - - bytes, err := json.Marshal(layerData) - if err != nil { - return err - } - - err = hcsImportLayer(layerPath, sourceFolderPath, string(bytes)) - if err != nil { - return errors.Wrap(err, "failed to import layer") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go b/test/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go deleted file mode 100644 index d0c6216056..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build windows - -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// InitializeWritableLayer initializes a writable layer for a container. -// -// `layerPath` is a path to a directory the layer is mounted. If the -// path does not end in a `\` the platform will append it automatically. -// -// `layerData` is the parent read-only layer data. -func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) { - title := "hcsshim::InitializeWritableLayer" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - ) - - bytes, err := json.Marshal(layerData) - if err != nil { - return err - } - - // Options are not used in the platform as of RS5 - err = hcsInitializeWritableLayer(layerPath, string(bytes), "") - if err != nil { - return errors.Wrap(err, "failed to intitialize container layer") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go b/test/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go deleted file mode 100644 index 4f4d8ebf2f..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build windows - -package computestorage - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer. -func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) { - title := "hcsshim::GetLayerVhdMountPath" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - var mountPath *uint16 - err = hcsGetLayerVhdMountPath(vhdHandle, &mountPath) - if err != nil { - return "", errors.Wrap(err, "failed to get vhd mount path") - } - path = interop.ConvertAndFreeCoTaskMemString(mountPath) - return path, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go b/test/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go deleted file mode 100644 index faec837ca1..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build windows - -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/osversion" - "github.com/pkg/errors" - "go.opencensus.io/trace" - "golang.org/x/sys/windows" -) - -// SetupBaseOSLayer sets up a layer that contains a base OS for a container. -// -// `layerPath` is a path to a directory containing the layer. -// -// `vhdHandle` is an empty file handle of `options.Type == OsLayerTypeContainer` -// or else it is a file handle to the 'SystemTemplateBase.vhdx' if `options.Type -// == OsLayerTypeVm`. -// -// `options` are the options applied while processing the layer. -func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) { - title := "hcsshim::SetupBaseOSLayer" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - ) - - bytes, err := json.Marshal(options) - if err != nil { - return err - } - - err = hcsSetupBaseOSLayer(layerPath, vhdHandle, string(bytes)) - if err != nil { - return errors.Wrap(err, "failed to setup base OS layer") - } - return nil -} - -// SetupBaseOSVolume sets up a volume that contains a base OS for a container. -// -// `layerPath` is a path to a directory containing the layer. -// -// `volumePath` is the path to the volume to be used for setup. -// -// `options` are the options applied while processing the layer. -func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) { - if osversion.Build() < 19645 { - return errors.New("SetupBaseOSVolume is not present on builds older than 19645") - } - title := "hcsshim::SetupBaseOSVolume" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - trace.StringAttribute("volumePath", volumePath), - ) - - bytes, err := json.Marshal(options) - if err != nil { - return err - } - - err = hcsSetupBaseOSVolume(layerPath, volumePath, string(bytes)) - if err != nil { - return errors.Wrap(err, "failed to setup base OS layer") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/container.go b/test/vendor/github.com/Microsoft/hcsshim/container.go deleted file mode 100644 index c8f09f88b9..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/container.go +++ /dev/null @@ -1,225 +0,0 @@ -//go:build windows - -package hcsshim - -import ( - "context" - "fmt" - "os" - "sync" - "time" - - "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - "github.com/Microsoft/hcsshim/internal/mergemaps" -) - -// ContainerProperties holds the properties for a container and the processes running in that container -type ContainerProperties = schema1.ContainerProperties - -// MemoryStats holds the memory statistics for a container -type MemoryStats = schema1.MemoryStats - -// ProcessorStats holds the processor statistics for a container -type ProcessorStats = schema1.ProcessorStats - -// StorageStats holds the storage statistics for a container -type StorageStats = schema1.StorageStats - -// NetworkStats holds the network statistics for a container -type NetworkStats = schema1.NetworkStats - -// Statistics is the structure returned by a statistics call on a container -type Statistics = schema1.Statistics - -// ProcessList is the structure of an item returned by a ProcessList call on a container -type ProcessListItem = schema1.ProcessListItem - -// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container -type MappedVirtualDiskController = schema1.MappedVirtualDiskController - -// Type of Request Support in ModifySystem -type RequestType = schema1.RequestType - -// Type of Resource Support in ModifySystem -type ResourceType = schema1.ResourceType - -// RequestType const -const ( - Add = schema1.Add - Remove = schema1.Remove - Network = schema1.Network -) - -// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse - -type container struct { - system *hcs.System - waitOnce sync.Once - waitErr error - waitCh chan struct{} -} - -// createContainerAdditionalJSON is read from the environment at initialization -// time. It allows an environment variable to define additional JSON which -// is merged in the CreateComputeSystem call to HCS. -var createContainerAdditionalJSON []byte - -func init() { - createContainerAdditionalJSON = ([]byte)(os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON")) -} - -// CreateContainer creates a new container with the given configuration but does not start it. -func CreateContainer(id string, c *ContainerConfig) (Container, error) { - fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON) - if err != nil { - return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err) - } - - system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig) - if err != nil { - return nil, err - } - return &container{system: system}, err -} - -// OpenContainer opens an existing container by ID. -func OpenContainer(id string) (Container, error) { - system, err := hcs.OpenComputeSystem(context.Background(), id) - if err != nil { - return nil, err - } - return &container{system: system}, err -} - -// GetContainers gets a list of the containers on the system that match the query -func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) { - return hcs.GetComputeSystems(context.Background(), q) -} - -// Start synchronously starts the container. -func (container *container) Start() error { - return convertSystemError(container.system.Start(context.Background()), container) -} - -// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. -func (container *container) Shutdown() error { - err := container.system.Shutdown(context.Background()) - if err != nil { - return convertSystemError(err, container) - } - return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Shutdown"} -} - -// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. -func (container *container) Terminate() error { - err := container.system.Terminate(context.Background()) - if err != nil { - return convertSystemError(err, container) - } - return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Terminate"} -} - -// Waits synchronously waits for the container to shutdown or terminate. -func (container *container) Wait() error { - err := container.system.Wait() - if err == nil { - err = container.system.ExitError() - } - return convertSystemError(err, container) -} - -// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It -// returns false if timeout occurs. -func (container *container) WaitTimeout(timeout time.Duration) error { - container.waitOnce.Do(func() { - container.waitCh = make(chan struct{}) - go func() { - container.waitErr = container.Wait() - close(container.waitCh) - }() - }) - t := time.NewTimer(timeout) - defer t.Stop() - select { - case <-t.C: - return &ContainerError{Container: container, Err: ErrTimeout, Operation: "hcsshim::ComputeSystem::Wait"} - case <-container.waitCh: - return container.waitErr - } -} - -// Pause pauses the execution of a container. -func (container *container) Pause() error { - return convertSystemError(container.system.Pause(context.Background()), container) -} - -// Resume resumes the execution of a container. -func (container *container) Resume() error { - return convertSystemError(container.system.Resume(context.Background()), container) -} - -// HasPendingUpdates returns true if the container has updates pending to install -func (container *container) HasPendingUpdates() (bool, error) { - return false, nil -} - -// Statistics returns statistics for the container. This is a legacy v1 call -func (container *container) Statistics() (Statistics, error) { - properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics) - if err != nil { - return Statistics{}, convertSystemError(err, container) - } - - return properties.Statistics, nil -} - -// ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call -func (container *container) ProcessList() ([]ProcessListItem, error) { - properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeProcessList) - if err != nil { - return nil, convertSystemError(err, container) - } - - return properties.ProcessList, nil -} - -// This is a legacy v1 call -func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) { - properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeMappedVirtualDisk) - if err != nil { - return nil, convertSystemError(err, container) - } - - return properties.MappedVirtualDiskControllers, nil -} - -// CreateProcess launches a new process within the container. -func (container *container) CreateProcess(c *ProcessConfig) (Process, error) { - p, err := container.system.CreateProcess(context.Background(), c) - if err != nil { - return nil, convertSystemError(err, container) - } - return &process{p: p.(*hcs.Process)}, nil -} - -// OpenProcess gets an interface to an existing process within the container. -func (container *container) OpenProcess(pid int) (Process, error) { - p, err := container.system.OpenProcess(context.Background(), pid) - if err != nil { - return nil, convertSystemError(err, container) - } - return &process{p: p}, nil -} - -// Close cleans up any state associated with the container but does not terminate or wait for it. -func (container *container) Close() error { - return convertSystemError(container.system.Close(), container) -} - -// Modify the System -func (container *container) Modify(config *ResourceModificationRequestResponse) error { - return convertSystemError(container.system.Modify(context.Background(), config), container) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/errors.go b/test/vendor/github.com/Microsoft/hcsshim/errors.go deleted file mode 100644 index 594bbfb7a8..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/errors.go +++ /dev/null @@ -1,250 +0,0 @@ -//go:build windows - -package hcsshim - -import ( - "fmt" - "syscall" - - "github.com/Microsoft/hcsshim/internal/hns" - - "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/hcserror" -) - -var ( - // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists = hcs.exist - ErrComputeSystemDoesNotExist = hcs.ErrComputeSystemDoesNotExist - - // ErrElementNotFound is an error encountered when the object being referenced does not exist - ErrElementNotFound = hcs.ErrElementNotFound - - // ErrElementNotFound is an error encountered when the object being referenced does not exist - ErrNotSupported = hcs.ErrNotSupported - - // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported - // decimal -2147024883 / hex 0x8007000d - ErrInvalidData = hcs.ErrInvalidData - - // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed - ErrHandleClose = hcs.ErrHandleClose - - // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method - ErrAlreadyClosed = hcs.ErrAlreadyClosed - - // ErrInvalidNotificationType is an error encountered when an invalid notification type is used - ErrInvalidNotificationType = hcs.ErrInvalidNotificationType - - // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation - ErrInvalidProcessState = hcs.ErrInvalidProcessState - - // ErrTimeout is an error encountered when waiting on a notification times out - ErrTimeout = hcs.ErrTimeout - - // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for - // a different expected notification - ErrUnexpectedContainerExit = hcs.ErrUnexpectedContainerExit - - // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service - // is lost while waiting for a notification - ErrUnexpectedProcessAbort = hcs.ErrUnexpectedProcessAbort - - // ErrUnexpectedValue is an error encountered when hcs returns an invalid value - ErrUnexpectedValue = hcs.ErrUnexpectedValue - - // ErrOperationDenied is an error when hcs attempts an operation that is explicitly denied - ErrOperationDenied = hcs.ErrOperationDenied - - // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container - ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped - - // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously - ErrVmcomputeOperationPending = hcs.ErrVmcomputeOperationPending - - // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation - ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState - - // ErrProcNotFound is an error encountered when a procedure look up fails. - ErrProcNotFound = hcs.ErrProcNotFound - - // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 - // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. - ErrVmcomputeOperationAccessIsDenied = hcs.ErrVmcomputeOperationAccessIsDenied - - // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management - ErrVmcomputeInvalidJSON = hcs.ErrVmcomputeInvalidJSON - - // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message - ErrVmcomputeUnknownMessage = hcs.ErrVmcomputeUnknownMessage - - // ErrNotSupported is an error encountered when hcs doesn't support the request - ErrPlatformNotSupported = hcs.ErrPlatformNotSupported -) - -type EndpointNotFoundError = hns.EndpointNotFoundError -type NetworkNotFoundError = hns.NetworkNotFoundError - -// ProcessError is an error encountered in HCS during an operation on a Process object -type ProcessError struct { - Process *process - Operation string - Err error - Events []hcs.ErrorEvent -} - -// ContainerError is an error encountered in HCS during an operation on a Container object -type ContainerError struct { - Container *container - Operation string - Err error - Events []hcs.ErrorEvent -} - -func (e *ContainerError) Error() string { - if e == nil { - return "" - } - - if e.Container == nil { - return "unexpected nil container for error: " + e.Err.Error() - } - - s := "container " + e.Container.system.ID() - - if e.Operation != "" { - s += " encountered an error during " + e.Operation - } - - switch e.Err.(type) { - case nil: - break - case syscall.Errno: - s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) - default: - s += fmt.Sprintf(": %s", e.Err.Error()) - } - - for _, ev := range e.Events { - s += "\n" + ev.String() - } - - return s -} - -func (e *ProcessError) Error() string { - if e == nil { - return "" - } - - if e.Process == nil { - return "Unexpected nil process for error: " + e.Err.Error() - } - - s := fmt.Sprintf("process %d in container %s", e.Process.p.Pid(), e.Process.p.SystemID()) - if e.Operation != "" { - s += " encountered an error during " + e.Operation - } - - switch e.Err.(type) { - case nil: - break - case syscall.Errno: - s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) - default: - s += fmt.Sprintf(": %s", e.Err.Error()) - } - - for _, ev := range e.Events { - s += "\n" + ev.String() - } - - return s -} - -// IsNotExist checks if an error is caused by the Container or Process not existing. -// Note: Currently, ErrElementNotFound can mean that a Process has either -// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound. -func IsNotExist(err error) bool { - if _, ok := err.(EndpointNotFoundError); ok { - return true - } - if _, ok := err.(NetworkNotFoundError); ok { - return true - } - return hcs.IsNotExist(getInnerError(err)) -} - -// IsAlreadyClosed checks if an error is caused by the Container or Process having been -// already closed by a call to the Close() method. -func IsAlreadyClosed(err error) bool { - return hcs.IsAlreadyClosed(getInnerError(err)) -} - -// IsPending returns a boolean indicating whether the error is that -// the requested operation is being completed in the background. -func IsPending(err error) bool { - return hcs.IsPending(getInnerError(err)) -} - -// IsTimeout returns a boolean indicating whether the error is caused by -// a timeout waiting for the operation to complete. -func IsTimeout(err error) bool { - return hcs.IsTimeout(getInnerError(err)) -} - -// IsAlreadyStopped returns a boolean indicating whether the error is caused by -// a Container or Process being already stopped. -// Note: Currently, ErrElementNotFound can mean that a Process has either -// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound. -func IsAlreadyStopped(err error) bool { - return hcs.IsAlreadyStopped(getInnerError(err)) -} - -// IsNotSupported returns a boolean indicating whether the error is caused by -// unsupported platform requests -// Note: Currently Unsupported platform requests can be mean either -// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage -// is thrown from the Platform -func IsNotSupported(err error) bool { - return hcs.IsNotSupported(getInnerError(err)) -} - -// IsOperationInvalidState returns true when err is caused by -// `ErrVmcomputeOperationInvalidState`. -func IsOperationInvalidState(err error) bool { - return hcs.IsOperationInvalidState(getInnerError(err)) -} - -// IsAccessIsDenied returns true when err is caused by -// `ErrVmcomputeOperationAccessIsDenied`. -func IsAccessIsDenied(err error) bool { - return hcs.IsAccessIsDenied(getInnerError(err)) -} - -func getInnerError(err error) error { - switch pe := err.(type) { - case nil: - return nil - case *ContainerError: - err = pe.Err - case *ProcessError: - err = pe.Err - } - return err -} - -func convertSystemError(err error, c *container) error { - if serr, ok := err.(*hcs.SystemError); ok { - return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events} - } - return err -} - -func convertProcessError(err error, p *process) error { - if perr, ok := err.(*hcs.ProcessError); ok { - return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events} - } - return err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/ext4/dmverity/dmverity.go b/test/vendor/github.com/Microsoft/hcsshim/ext4/dmverity/dmverity.go deleted file mode 100644 index f45307b961..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/ext4/dmverity/dmverity.go +++ /dev/null @@ -1,248 +0,0 @@ -package dmverity - -import ( - "bufio" - "bytes" - "crypto/rand" - "crypto/sha256" - "encoding/binary" - "fmt" - "io" - "os" - - "github.com/pkg/errors" - - "github.com/Microsoft/hcsshim/ext4/internal/compactext4" - "github.com/Microsoft/hcsshim/internal/memory" -) - -const ( - blockSize = compactext4.BlockSize - // MerkleTreeBufioSize is a default buffer size to use with bufio.Reader - MerkleTreeBufioSize = memory.MiB // 1MB - // RecommendedVHDSizeGB is the recommended size in GB for VHDs, which is not a hard limit. - RecommendedVHDSizeGB = 128 * memory.GiB - // VeritySignature is a value written to dm-verity super-block. - VeritySignature = "verity" -) - -var ( - salt = bytes.Repeat([]byte{0}, 32) - sbSize = binary.Size(dmveritySuperblock{}) -) - -var ( - ErrSuperBlockReadFailure = errors.New("failed to read dm-verity super block") - ErrSuperBlockParseFailure = errors.New("failed to parse dm-verity super block") - ErrRootHashReadFailure = errors.New("failed to read dm-verity root hash") - ErrNotVeritySuperBlock = errors.New("invalid dm-verity super-block signature") -) - -type dmveritySuperblock struct { - /* (0) "verity\0\0" */ - Signature [8]byte - /* (8) superblock version, 1 */ - Version uint32 - /* (12) 0 - Chrome OS, 1 - normal */ - HashType uint32 - /* (16) UUID of hash device */ - UUID [16]byte - /* (32) Name of the hash algorithm (e.g., sha256) */ - Algorithm [32]byte - /* (64) The data block size in bytes */ - DataBlockSize uint32 - /* (68) The hash block size in bytes */ - HashBlockSize uint32 - /* (72) The number of data blocks */ - DataBlocks uint64 - /* (80) Size of the salt */ - SaltSize uint16 - /* (82) Padding */ - _ [6]byte - /* (88) The salt */ - Salt [256]byte - /* (344) Padding */ - _ [168]byte -} - -// VerityInfo is minimal exported version of dmveritySuperblock -type VerityInfo struct { - // Offset in blocks on hash device - HashOffsetInBlocks int64 - // Set to true, when dm-verity super block is also written on the hash device - SuperBlock bool - RootDigest string - Salt string - Algorithm string - DataBlockSize uint32 - HashBlockSize uint32 - DataBlocks uint64 - Version uint32 -} - -// MerkleTree constructs dm-verity hash-tree for a given io.Reader with a fixed salt (0-byte) and algorithm (sha256). -func MerkleTree(r io.Reader) ([]byte, error) { - layers := make([][]byte, 0) - currentLevel := r - - for { - nextLevel := bytes.NewBuffer(make([]byte, 0)) - for { - block := make([]byte, blockSize) - if _, err := io.ReadFull(currentLevel, block); err != nil { - if err == io.EOF { - break - } - return nil, errors.Wrap(err, "failed to read data block") - } - h := hash2(salt, block) - nextLevel.Write(h) - } - - padding := bytes.Repeat([]byte{0}, blockSize-(nextLevel.Len()%blockSize)) - nextLevel.Write(padding) - - layers = append(layers, nextLevel.Bytes()) - currentLevel = bufio.NewReaderSize(nextLevel, MerkleTreeBufioSize) - - // This means that only root hash remains and our job is done - if nextLevel.Len() == blockSize { - break - } - } - - tree := bytes.NewBuffer(make([]byte, 0)) - for i := len(layers) - 1; i >= 0; i-- { - if _, err := tree.Write(layers[i]); err != nil { - return nil, errors.Wrap(err, "failed to write merkle tree") - } - } - - return tree.Bytes(), nil -} - -// RootHash computes root hash of dm-verity hash-tree -func RootHash(tree []byte) []byte { - return hash2(salt, tree[:blockSize]) -} - -// NewDMVeritySuperblock returns a dm-verity superblock for a device with a given size, salt, algorithm and versions are -// fixed. -func NewDMVeritySuperblock(size uint64) *dmveritySuperblock { - superblock := &dmveritySuperblock{ - Version: 1, - HashType: 1, - UUID: generateUUID(), - DataBlockSize: blockSize, - HashBlockSize: blockSize, - DataBlocks: size / blockSize, - SaltSize: uint16(len(salt)), - } - - copy(superblock.Signature[:], VeritySignature) - copy(superblock.Algorithm[:], "sha256") - copy(superblock.Salt[:], salt) - - return superblock -} - -func hash2(a, b []byte) []byte { - h := sha256.New() - h.Write(append(a, b...)) - return h.Sum(nil) -} - -func generateUUID() [16]byte { - res := [16]byte{} - if _, err := rand.Read(res[:]); err != nil { - panic(err) - } - return res -} - -// ReadDMVerityInfo extracts dm-verity super block information and merkle tree root hash -func ReadDMVerityInfo(vhdPath string, offsetInBytes int64) (*VerityInfo, error) { - vhd, err := os.OpenFile(vhdPath, os.O_RDONLY, 0) - if err != nil { - return nil, err - } - defer vhd.Close() - - // Skip the ext4 data to get to dm-verity super block - if s, err := vhd.Seek(offsetInBytes, io.SeekStart); err != nil || s != offsetInBytes { - if err != nil { - return nil, errors.Wrap(err, "failed to seek dm-verity super block") - } - return nil, errors.Errorf("failed to seek dm-verity super block: expected bytes=%d, actual=%d", offsetInBytes, s) - } - - block := make([]byte, blockSize) - if s, err := vhd.Read(block); err != nil || s != blockSize { - if err != nil { - return nil, errors.Wrapf(err, "%s", ErrSuperBlockReadFailure) - } - return nil, errors.Wrapf(ErrSuperBlockReadFailure, "unexpected bytes read: expected=%d, actual=%d", blockSize, s) - } - - dmvSB := &dmveritySuperblock{} - b := bytes.NewBuffer(block) - if err := binary.Read(b, binary.LittleEndian, dmvSB); err != nil { - return nil, errors.Wrapf(err, "%s", ErrSuperBlockParseFailure) - } - if string(bytes.Trim(dmvSB.Signature[:], "\x00")[:]) != VeritySignature { - return nil, ErrNotVeritySuperBlock - } - // read the merkle tree root - if s, err := vhd.Read(block); err != nil || s != blockSize { - if err != nil { - return nil, errors.Wrapf(err, "%s", ErrRootHashReadFailure) - } - return nil, errors.Wrapf(ErrRootHashReadFailure, "unexpected bytes read: expected=%d, actual=%d", blockSize, s) - } - rootHash := hash2(dmvSB.Salt[:dmvSB.SaltSize], block) - return &VerityInfo{ - RootDigest: fmt.Sprintf("%x", rootHash), - Algorithm: string(bytes.Trim(dmvSB.Algorithm[:], "\x00")), - Salt: fmt.Sprintf("%x", dmvSB.Salt[:dmvSB.SaltSize]), - HashOffsetInBlocks: int64(dmvSB.DataBlocks), - SuperBlock: true, - DataBlocks: dmvSB.DataBlocks, - DataBlockSize: dmvSB.DataBlockSize, - HashBlockSize: blockSize, - Version: dmvSB.Version, - }, nil -} - -// ComputeAndWriteHashDevice builds merkle tree from a given io.ReadSeeker and writes the result -// hash device (dm-verity super-block combined with merkle tree) to io.WriteSeeker. -func ComputeAndWriteHashDevice(r io.ReadSeeker, w io.WriteSeeker) error { - if _, err := r.Seek(0, io.SeekStart); err != nil { - return err - } - tree, err := MerkleTree(r) - if err != nil { - return errors.Wrap(err, "failed to build merkle tree") - } - - devSize, err := r.Seek(0, io.SeekEnd) - if err != nil { - return err - } - dmVeritySB := NewDMVeritySuperblock(uint64(devSize)) - if _, err := w.Seek(0, io.SeekEnd); err != nil { - return err - } - if err := binary.Write(w, binary.LittleEndian, dmVeritySB); err != nil { - return errors.Wrap(err, "failed to write dm-verity super-block") - } - // write super-block padding - padding := bytes.Repeat([]byte{0}, blockSize-(sbSize%blockSize)) - if _, err = w.Write(padding); err != nil { - return err - } - // write tree - if _, err := w.Write(tree); err != nil { - return errors.Wrap(err, "failed to write merkle tree") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go b/test/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go deleted file mode 100644 index 504437270b..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go +++ /dev/null @@ -1,1348 +0,0 @@ -package compactext4 - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "path" - "sort" - "strings" - "time" - - "github.com/Microsoft/hcsshim/ext4/internal/format" - "github.com/Microsoft/hcsshim/internal/memory" -) - -// Writer writes a compact ext4 file system. -type Writer struct { - f io.ReadWriteSeeker - bw *bufio.Writer - inodes []*inode - curName string - curInode *inode - pos int64 - dataWritten, dataMax int64 - err error - initialized bool - supportInlineData bool - maxDiskSize int64 - gdBlocks uint32 -} - -// Mode flags for Linux files. -const ( - S_IXOTH = format.S_IXOTH - S_IWOTH = format.S_IWOTH - S_IROTH = format.S_IROTH - S_IXGRP = format.S_IXGRP - S_IWGRP = format.S_IWGRP - S_IRGRP = format.S_IRGRP - S_IXUSR = format.S_IXUSR - S_IWUSR = format.S_IWUSR - S_IRUSR = format.S_IRUSR - S_ISVTX = format.S_ISVTX - S_ISGID = format.S_ISGID - S_ISUID = format.S_ISUID - S_IFIFO = format.S_IFIFO - S_IFCHR = format.S_IFCHR - S_IFDIR = format.S_IFDIR - S_IFBLK = format.S_IFBLK - S_IFREG = format.S_IFREG - S_IFLNK = format.S_IFLNK - S_IFSOCK = format.S_IFSOCK - - TypeMask = format.TypeMask -) - -type inode struct { - Size int64 - Atime, Ctime, Mtime, Crtime uint64 - Number format.InodeNumber - Mode uint16 - Uid, Gid uint32 - LinkCount uint32 - XattrBlock uint32 - BlockCount uint32 - Devmajor, Devminor uint32 - Flags format.InodeFlag - Data []byte - XattrInline []byte - Children directory -} - -func (node *inode) FileType() uint16 { - return node.Mode & format.TypeMask -} - -func (node *inode) IsDir() bool { - return node.FileType() == S_IFDIR -} - -// A File represents a file to be added to an ext4 file system. -type File struct { - Linkname string - Size int64 - Mode uint16 - Uid, Gid uint32 - Atime, Ctime, Mtime, Crtime time.Time - Devmajor, Devminor uint32 - Xattrs map[string][]byte -} - -const ( - inodeFirst = 11 - inodeLostAndFound = inodeFirst - - BlockSize = 4096 - blocksPerGroup = BlockSize * 8 - inodeSize = 256 - maxInodesPerGroup = BlockSize * 8 // Limited by the inode bitmap - inodesPerGroupIncrement = BlockSize / inodeSize - - defaultMaxDiskSize = 16 * memory.GiB // 16GB - maxMaxDiskSize = 16 * 1024 * 1024 * 1024 * 1024 // 16TB - - groupDescriptorSize = 32 // Use the small group descriptor - groupsPerDescriptorBlock = BlockSize / groupDescriptorSize - - maxFileSize = 128 * memory.GiB // 128GB file size maximum for now - smallSymlinkSize = 59 // max symlink size that goes directly in the inode - maxBlocksPerExtent = 0x8000 // maximum number of blocks in an extent - inodeDataSize = 60 - inodeUsedSize = 152 // fields through CrtimeExtra - inodeExtraSize = inodeSize - inodeUsedSize - xattrInodeOverhead = 4 + 4 // magic number + empty next entry value - xattrBlockOverhead = 32 + 4 // header + empty next entry value - inlineDataXattrOverhead = xattrInodeOverhead + 16 + 4 // entry + "data" - inlineDataSize = inodeDataSize + inodeExtraSize - inlineDataXattrOverhead -) - -type exceededMaxSizeError struct { - Size int64 -} - -func (err exceededMaxSizeError) Error() string { - return fmt.Sprintf("disk exceeded maximum size of %d bytes", err.Size) -} - -var directoryEntrySize = binary.Size(format.DirectoryEntry{}) -var extraIsize = uint16(inodeUsedSize - 128) - -type directory map[string]*inode - -func splitFirst(p string) (string, string) { - n := strings.IndexByte(p, '/') - if n >= 0 { - return p[:n], p[n+1:] - } - return p, "" -} - -func (w *Writer) findPath(root *inode, p string) *inode { - inode := root - for inode != nil && len(p) != 0 { - name, rest := splitFirst(p) - p = rest - inode = inode.Children[name] - } - return inode -} - -func timeToFsTime(t time.Time) uint64 { - if t.IsZero() { - return 0 - } - s := t.Unix() - if s < -0x80000000 { - return 0x80000000 - } - if s > 0x37fffffff { - return 0x37fffffff - } - return uint64(s) | uint64(t.Nanosecond())<<34 -} - -func fsTimeToTime(t uint64) time.Time { - if t == 0 { - return time.Time{} - } - s := int64(t & 0x3ffffffff) - if s > 0x7fffffff && s < 0x100000000 { - s = int64(int32(uint32(s))) - } - return time.Unix(s, int64(t>>34)) -} - -func (w *Writer) getInode(i format.InodeNumber) *inode { - if i == 0 || int(i) > len(w.inodes) { - return nil - } - return w.inodes[i-1] -} - -var xattrPrefixes = []struct { - Index uint8 - Prefix string -}{ - {2, "system.posix_acl_access"}, - {3, "system.posix_acl_default"}, - {8, "system.richacl"}, - {7, "system."}, - {1, "user."}, - {4, "trusted."}, - {6, "security."}, -} - -func compressXattrName(name string) (uint8, string) { - for _, p := range xattrPrefixes { - if strings.HasPrefix(name, p.Prefix) { - return p.Index, name[len(p.Prefix):] - } - } - return 0, name -} - -func decompressXattrName(index uint8, name string) string { - for _, p := range xattrPrefixes { - if index == p.Index { - return p.Prefix + name - } - } - return name -} - -func hashXattrEntry(name string, value []byte) uint32 { - var hash uint32 - for i := 0; i < len(name); i++ { - hash = (hash << 5) ^ (hash >> 27) ^ uint32(name[i]) - } - - for i := 0; i+3 < len(value); i += 4 { - hash = (hash << 16) ^ (hash >> 16) ^ binary.LittleEndian.Uint32(value[i:i+4]) - } - - if len(value)%4 != 0 { - var last [4]byte - copy(last[:], value[len(value)&^3:]) - hash = (hash << 16) ^ (hash >> 16) ^ binary.LittleEndian.Uint32(last[:]) - } - return hash -} - -type xattr struct { - Name string - Index uint8 - Value []byte -} - -func (x *xattr) EntryLen() int { - return (len(x.Name)+3)&^3 + 16 -} - -func (x *xattr) ValueLen() int { - return (len(x.Value) + 3) &^ 3 -} - -type xattrState struct { - inode, block []xattr - inodeLeft, blockLeft int -} - -func (s *xattrState) init() { - s.inodeLeft = inodeExtraSize - xattrInodeOverhead - s.blockLeft = BlockSize - xattrBlockOverhead -} - -func (s *xattrState) addXattr(name string, value []byte) bool { - index, name := compressXattrName(name) - x := xattr{ - Index: index, - Name: name, - Value: value, - } - length := x.EntryLen() + x.ValueLen() - if s.inodeLeft >= length { - s.inode = append(s.inode, x) - s.inodeLeft -= length - } else if s.blockLeft >= length { - s.block = append(s.block, x) - s.blockLeft -= length - } else { - return false - } - return true -} - -func putXattrs(xattrs []xattr, b []byte, offsetDelta uint16) { - offset := uint16(len(b)) + offsetDelta - eb := b - db := b - for _, xattr := range xattrs { - vl := xattr.ValueLen() - offset -= uint16(vl) - eb[0] = uint8(len(xattr.Name)) - eb[1] = xattr.Index - binary.LittleEndian.PutUint16(eb[2:], offset) - binary.LittleEndian.PutUint32(eb[8:], uint32(len(xattr.Value))) - binary.LittleEndian.PutUint32(eb[12:], hashXattrEntry(xattr.Name, xattr.Value)) - copy(eb[16:], xattr.Name) - eb = eb[xattr.EntryLen():] - copy(db[len(db)-vl:], xattr.Value) - db = db[:len(db)-vl] - } -} - -func getXattrs(b []byte, xattrs map[string][]byte, offsetDelta uint16) { - eb := b - for len(eb) != 0 { - nameLen := eb[0] - if nameLen == 0 { - break - } - index := eb[1] - offset := binary.LittleEndian.Uint16(eb[2:]) - offsetDelta - valueLen := binary.LittleEndian.Uint32(eb[8:]) - attr := xattr{ - Index: index, - Name: string(eb[16 : 16+nameLen]), - Value: b[offset : uint32(offset)+valueLen], - } - xattrs[decompressXattrName(index, attr.Name)] = attr.Value - eb = eb[attr.EntryLen():] - } -} - -func (w *Writer) writeXattrs(inode *inode, state *xattrState) error { - // Write the inline attributes. - if len(state.inode) != 0 { - inode.XattrInline = make([]byte, inodeExtraSize) - binary.LittleEndian.PutUint32(inode.XattrInline[0:], format.XAttrHeaderMagic) // Magic - putXattrs(state.inode, inode.XattrInline[4:], 0) - } - - // Write the block attributes. If there was previously an xattr block, then - // rewrite it even if it is now empty. - if len(state.block) != 0 || inode.XattrBlock != 0 { - sort.Slice(state.block, func(i, j int) bool { - return state.block[i].Index < state.block[j].Index || - len(state.block[i].Name) < len(state.block[j].Name) || - state.block[i].Name < state.block[j].Name - }) - - var b [BlockSize]byte - binary.LittleEndian.PutUint32(b[0:], format.XAttrHeaderMagic) // Magic - binary.LittleEndian.PutUint32(b[4:], 1) // ReferenceCount - binary.LittleEndian.PutUint32(b[8:], 1) // Blocks - putXattrs(state.block, b[32:], 32) - - orig := w.block() - if inode.XattrBlock == 0 { - inode.XattrBlock = orig - inode.BlockCount++ - } else { - // Reuse the original block. - w.seekBlock(inode.XattrBlock) - defer w.seekBlock(orig) - } - - if _, err := w.write(b[:]); err != nil { - return err - } - } - - return nil -} - -func (w *Writer) write(b []byte) (int, error) { - if w.err != nil { - return 0, w.err - } - if w.pos+int64(len(b)) > w.maxDiskSize { - w.err = exceededMaxSizeError{w.maxDiskSize} - return 0, w.err - } - n, err := w.bw.Write(b) - w.pos += int64(n) - w.err = err - return n, err -} - -func (w *Writer) zero(n int64) (int64, error) { - if w.err != nil { - return 0, w.err - } - if w.pos+int64(n) > w.maxDiskSize { - w.err = exceededMaxSizeError{w.maxDiskSize} - return 0, w.err - } - n, err := io.CopyN(w.bw, zero, n) - w.pos += n - w.err = err - return n, err -} - -func (w *Writer) makeInode(f *File, node *inode) (*inode, error) { - mode := f.Mode - if mode&format.TypeMask == 0 { - mode |= format.S_IFREG - } - typ := mode & format.TypeMask - ino := format.InodeNumber(len(w.inodes) + 1) - if node == nil { - node = &inode{ - Number: ino, - } - if typ == S_IFDIR { - node.Children = make(directory) - node.LinkCount = 1 // A directory is linked to itself. - } - } else if node.Flags&format.InodeFlagExtents != 0 { - // Since we cannot deallocate or reuse blocks, don't allow updates that - // would invalidate data that has already been written. - return nil, errors.New("cannot overwrite file with non-inline data") - } - node.Mode = mode - node.Uid = f.Uid - node.Gid = f.Gid - node.Flags = format.InodeFlagHugeFile - node.Atime = timeToFsTime(f.Atime) - node.Ctime = timeToFsTime(f.Ctime) - node.Mtime = timeToFsTime(f.Mtime) - node.Crtime = timeToFsTime(f.Crtime) - node.Devmajor = f.Devmajor - node.Devminor = f.Devminor - node.Data = nil - if f.Xattrs == nil { - f.Xattrs = make(map[string][]byte) - } - - // copy over existing xattrs first, we need to merge existing xattrs and the passed xattrs. - existingXattrs := make(map[string][]byte) - if len(node.XattrInline) > 0 { - getXattrs(node.XattrInline[4:], existingXattrs, 0) - } - node.XattrInline = nil - - var xstate xattrState - xstate.init() - - var size int64 - switch typ { - case format.S_IFREG: - size = f.Size - if f.Size > maxFileSize { - return nil, fmt.Errorf("file too big: %d > %d", f.Size, int64(maxFileSize)) - } - if f.Size <= inlineDataSize && w.supportInlineData { - node.Data = make([]byte, f.Size) - extra := 0 - if f.Size > inodeDataSize { - extra = int(f.Size - inodeDataSize) - } - // Add a dummy entry for now. - if !xstate.addXattr("system.data", node.Data[:extra]) { - panic("not enough room for inline data") - } - node.Flags |= format.InodeFlagInlineData - } - case format.S_IFLNK: - node.Mode |= 0777 // Symlinks should appear as ugw rwx - size = int64(len(f.Linkname)) - if size <= smallSymlinkSize { - // Special case: small symlinks go directly in Block without setting - // an inline data flag. - node.Data = make([]byte, len(f.Linkname)) - copy(node.Data, f.Linkname) - } - case format.S_IFDIR, format.S_IFIFO, format.S_IFSOCK, format.S_IFCHR, format.S_IFBLK: - default: - return nil, fmt.Errorf("invalid mode %o", mode) - } - - // merge xattrs but prefer currently passed over existing - for name, data := range existingXattrs { - if _, ok := f.Xattrs[name]; !ok { - f.Xattrs[name] = data - } - } - - // Accumulate the extended attributes. - if len(f.Xattrs) != 0 { - // Sort the xattrs to avoid non-determinism in map iteration. - var xattrs []string - for name := range f.Xattrs { - xattrs = append(xattrs, name) - } - sort.Strings(xattrs) - for _, name := range xattrs { - if !xstate.addXattr(name, f.Xattrs[name]) { - return nil, fmt.Errorf("could not fit xattr %s", name) - } - } - } - - if err := w.writeXattrs(node, &xstate); err != nil { - return nil, err - } - - node.Size = size - if typ == format.S_IFLNK && size > smallSymlinkSize { - // Write the link name as data. - w.startInode("", node, size) - if _, err := w.Write([]byte(f.Linkname)); err != nil { - return nil, err - } - if err := w.finishInode(); err != nil { - return nil, err - } - } - - if int(node.Number-1) >= len(w.inodes) { - w.inodes = append(w.inodes, node) - } - return node, nil -} - -func (w *Writer) root() *inode { - return w.getInode(format.InodeRoot) -} - -func (w *Writer) lookup(name string, mustExist bool) (*inode, *inode, string, error) { - root := w.root() - cleanname := path.Clean("/" + name)[1:] - if len(cleanname) == 0 { - return root, root, "", nil - } - dirname, childname := path.Split(cleanname) - if len(childname) == 0 || len(childname) > 0xff { - return nil, nil, "", fmt.Errorf("%s: invalid name", name) - } - dir := w.findPath(root, dirname) - if dir == nil || !dir.IsDir() { - return nil, nil, "", fmt.Errorf("%s: path not found", name) - } - child := dir.Children[childname] - if child == nil && mustExist { - return nil, nil, "", fmt.Errorf("%s: file not found", name) - } - return dir, child, childname, nil -} - -// MakeParents ensures that all the parent directories in the path specified by `name` exists. If -// they don't exist it creates them (like `mkdir -p`). These non existing parent directories are created -// with the same permissions as that of it's parent directory. It is expected that the a -// call to make these parent directories will be made at a later point with the correct -// permissions, at that time the permissions of these directories will be updated. -func (w *Writer) MakeParents(name string) error { - if err := w.finishInode(); err != nil { - return err - } - - // go through the directories in the path one by one and create the - // parent directories if they don't exist. - cleanname := path.Clean("/" + name)[1:] - parentDirs, _ := path.Split(cleanname) - currentPath := "" - root := w.root() - dirname := "" - for parentDirs != "" { - dirname, parentDirs = splitFirst(parentDirs) - currentPath += "/" + dirname - if _, ok := root.Children[dirname]; !ok { - f := &File{ - Mode: root.Mode, - Atime: time.Now(), - Mtime: time.Now(), - Ctime: time.Now(), - Crtime: time.Now(), - Size: 0, - Uid: root.Uid, - Gid: root.Gid, - Devmajor: root.Devmajor, - Devminor: root.Devminor, - Xattrs: make(map[string][]byte), - } - if err := w.Create(currentPath, f); err != nil { - return fmt.Errorf("failed while creating parent directories: %w", err) - } - } - root = root.Children[dirname] - } - return nil -} - -// Create adds a file to the file system. -func (w *Writer) Create(name string, f *File) error { - if err := w.finishInode(); err != nil { - return err - } - dir, existing, childname, err := w.lookup(name, false) - if err != nil { - return err - } - var reuse *inode - if existing != nil { - if existing.IsDir() { - if f.Mode&TypeMask != S_IFDIR { - return fmt.Errorf("%s: cannot replace a directory with a file", name) - } - reuse = existing - } else if f.Mode&TypeMask == S_IFDIR { - return fmt.Errorf("%s: cannot replace a file with a directory", name) - } else if existing.LinkCount < 2 { - reuse = existing - } - } else { - if f.Mode&TypeMask == S_IFDIR && dir.LinkCount >= format.MaxLinks { - return fmt.Errorf("%s: exceeded parent directory maximum link count", name) - } - } - child, err := w.makeInode(f, reuse) - if err != nil { - return fmt.Errorf("%s: %s", name, err) - } - if existing != child { - if existing != nil { - existing.LinkCount-- - } - dir.Children[childname] = child - child.LinkCount++ - if child.IsDir() { - dir.LinkCount++ - } - } - if child.Mode&format.TypeMask == format.S_IFREG { - w.startInode(name, child, f.Size) - } - return nil -} - -// Link adds a hard link to the file system. -// We support creating hardlinks to symlinks themselves instead of what -// the symlinks link to, as this is what containerd does upstream. -func (w *Writer) Link(oldname, newname string) error { - if err := w.finishInode(); err != nil { - return err - } - newdir, existing, newchildname, err := w.lookup(newname, false) - if err != nil { - return err - } - if existing != nil && (existing.IsDir() || existing.LinkCount < 2) { - return fmt.Errorf("%s: cannot orphan existing file or directory", newname) - } - - _, oldfile, _, err := w.lookup(oldname, true) - if err != nil { - return err - } - switch oldfile.Mode & format.TypeMask { - case format.S_IFDIR: - return fmt.Errorf("%s: link target cannot be a directory: %s", newname, oldname) - } - - if existing != oldfile && oldfile.LinkCount >= format.MaxLinks { - return fmt.Errorf("%s: link target would exceed maximum link count: %s", newname, oldname) - } - - if existing != nil { - existing.LinkCount-- - } - oldfile.LinkCount++ - newdir.Children[newchildname] = oldfile - return nil -} - -// Stat returns information about a file that has been written. -func (w *Writer) Stat(name string) (*File, error) { - if err := w.finishInode(); err != nil { - return nil, err - } - _, node, _, err := w.lookup(name, true) - if err != nil { - return nil, err - } - f := &File{ - Size: node.Size, - Mode: node.Mode, - Uid: node.Uid, - Gid: node.Gid, - Atime: fsTimeToTime(node.Atime), - Ctime: fsTimeToTime(node.Ctime), - Mtime: fsTimeToTime(node.Mtime), - Crtime: fsTimeToTime(node.Crtime), - Devmajor: node.Devmajor, - Devminor: node.Devminor, - } - f.Xattrs = make(map[string][]byte) - if node.XattrBlock != 0 || len(node.XattrInline) != 0 { - if node.XattrBlock != 0 { - orig := w.block() - w.seekBlock(node.XattrBlock) - if w.err != nil { - return nil, w.err - } - var b [BlockSize]byte - _, err := w.f.Read(b[:]) - w.seekBlock(orig) - if err != nil { - return nil, err - } - getXattrs(b[32:], f.Xattrs, 32) - } - if len(node.XattrInline) != 0 { - getXattrs(node.XattrInline[4:], f.Xattrs, 0) - delete(f.Xattrs, "system.data") - } - } - if node.FileType() == S_IFLNK { - if node.Size > smallSymlinkSize { - return nil, fmt.Errorf("%s: cannot retrieve link information", name) - } - f.Linkname = string(node.Data) - } - return f, nil -} - -func (w *Writer) Write(b []byte) (int, error) { - if len(b) == 0 { - return 0, nil - } - if w.dataWritten+int64(len(b)) > w.dataMax { - return 0, fmt.Errorf("%s: wrote too much: %d > %d", w.curName, w.dataWritten+int64(len(b)), w.dataMax) - } - - if w.curInode.Flags&format.InodeFlagInlineData != 0 { - copy(w.curInode.Data[w.dataWritten:], b) - w.dataWritten += int64(len(b)) - return len(b), nil - } - - n, err := w.write(b) - w.dataWritten += int64(n) - return n, err -} - -func (w *Writer) startInode(name string, inode *inode, size int64) { - if w.curInode != nil { - panic("inode already in progress") - } - w.curName = name - w.curInode = inode - w.dataWritten = 0 - w.dataMax = size -} - -func (w *Writer) block() uint32 { - return uint32(w.pos / BlockSize) -} - -func (w *Writer) seekBlock(block uint32) { - w.pos = int64(block) * BlockSize - if w.err != nil { - return - } - w.err = w.bw.Flush() - if w.err != nil { - return - } - _, w.err = w.f.Seek(w.pos, io.SeekStart) -} - -func (w *Writer) nextBlock() { - if w.pos%BlockSize != 0 { - // Simplify callers; w.err is updated on failure. - _, _ = w.zero(BlockSize - w.pos%BlockSize) - } -} - -func fillExtents(hdr *format.ExtentHeader, extents []format.ExtentLeafNode, startBlock, offset, inodeSize uint32) { - *hdr = format.ExtentHeader{ - Magic: format.ExtentHeaderMagic, - Entries: uint16(len(extents)), - Max: uint16(cap(extents)), - Depth: 0, - } - for i := range extents { - block := offset + uint32(i)*maxBlocksPerExtent - length := inodeSize - block - if length > maxBlocksPerExtent { - length = maxBlocksPerExtent - } - start := startBlock + block - extents[i] = format.ExtentLeafNode{ - Block: block, - Length: uint16(length), - StartLow: start, - } - } -} - -func (w *Writer) writeExtents(inode *inode) error { - start := w.pos - w.dataWritten - if start%BlockSize != 0 { - panic("unaligned") - } - w.nextBlock() - - startBlock := uint32(start / BlockSize) - blocks := w.block() - startBlock - usedBlocks := blocks - - const extentNodeSize = 12 - const extentsPerBlock = BlockSize/extentNodeSize - 1 - - extents := (blocks + maxBlocksPerExtent - 1) / maxBlocksPerExtent - var b bytes.Buffer - if extents == 0 { - // Nothing to do. - } else if extents <= 4 { - var root struct { - hdr format.ExtentHeader - extents [4]format.ExtentLeafNode - } - fillExtents(&root.hdr, root.extents[:extents], startBlock, 0, blocks) - _ = binary.Write(&b, binary.LittleEndian, root) - } else if extents <= 4*extentsPerBlock { - const extentsPerBlock = BlockSize/extentNodeSize - 1 - extentBlocks := extents/extentsPerBlock + 1 - usedBlocks += extentBlocks - var b2 bytes.Buffer - - var root struct { - hdr format.ExtentHeader - nodes [4]format.ExtentIndexNode - } - root.hdr = format.ExtentHeader{ - Magic: format.ExtentHeaderMagic, - Entries: uint16(extentBlocks), - Max: 4, - Depth: 1, - } - for i := uint32(0); i < extentBlocks; i++ { - root.nodes[i] = format.ExtentIndexNode{ - Block: i * extentsPerBlock * maxBlocksPerExtent, - LeafLow: w.block(), - } - extentsInBlock := extents - i*extentBlocks - if extentsInBlock > extentsPerBlock { - extentsInBlock = extentsPerBlock - } - - var node struct { - hdr format.ExtentHeader - extents [extentsPerBlock]format.ExtentLeafNode - _ [BlockSize - (extentsPerBlock+1)*extentNodeSize]byte - } - - offset := i * extentsPerBlock * maxBlocksPerExtent - fillExtents(&node.hdr, node.extents[:extentsInBlock], startBlock+offset, offset, blocks) - _ = binary.Write(&b2, binary.LittleEndian, node) - if _, err := w.write(b2.Next(BlockSize)); err != nil { - return err - } - } - _ = binary.Write(&b, binary.LittleEndian, root) - } else { - panic("file too big") - } - - inode.Data = b.Bytes() - inode.Flags |= format.InodeFlagExtents - inode.BlockCount += usedBlocks - return w.err -} - -func (w *Writer) finishInode() error { - if !w.initialized { - if err := w.init(); err != nil { - return err - } - } - if w.curInode == nil { - return nil - } - if w.dataWritten != w.dataMax { - return fmt.Errorf("did not write the right amount: %d != %d", w.dataWritten, w.dataMax) - } - - if w.dataMax != 0 && w.curInode.Flags&format.InodeFlagInlineData == 0 { - if err := w.writeExtents(w.curInode); err != nil { - return err - } - } - - w.dataWritten = 0 - w.dataMax = 0 - w.curInode = nil - return w.err -} - -func modeToFileType(mode uint16) format.FileType { - switch mode & format.TypeMask { - default: - return format.FileTypeUnknown - case format.S_IFREG: - return format.FileTypeRegular - case format.S_IFDIR: - return format.FileTypeDirectory - case format.S_IFCHR: - return format.FileTypeCharacter - case format.S_IFBLK: - return format.FileTypeBlock - case format.S_IFIFO: - return format.FileTypeFIFO - case format.S_IFSOCK: - return format.FileTypeSocket - case format.S_IFLNK: - return format.FileTypeSymbolicLink - } -} - -type constReader byte - -var zero = constReader(0) - -func (r constReader) Read(b []byte) (int, error) { - for i := range b { - b[i] = byte(r) - } - return len(b), nil -} - -func (w *Writer) writeDirectory(dir, parent *inode) error { - if err := w.finishInode(); err != nil { - return err - } - - // The size of the directory is not known yet. - w.startInode("", dir, 0x7fffffffffffffff) - left := BlockSize - finishBlock := func() error { - if left > 0 { - e := format.DirectoryEntry{ - RecordLength: uint16(left), - } - err := binary.Write(w, binary.LittleEndian, e) - if err != nil { - return err - } - left -= directoryEntrySize - if left < 4 { - panic("not enough space for trailing entry") - } - _, err = io.CopyN(w, zero, int64(left)) - if err != nil { - return err - } - } - left = BlockSize - return nil - } - - writeEntry := func(ino format.InodeNumber, name string) error { - rlb := directoryEntrySize + len(name) - rl := (rlb + 3) & ^3 - if left < rl+12 { - if err := finishBlock(); err != nil { - return err - } - } - e := format.DirectoryEntry{ - Inode: ino, - RecordLength: uint16(rl), - NameLength: uint8(len(name)), - FileType: modeToFileType(w.getInode(ino).Mode), - } - err := binary.Write(w, binary.LittleEndian, e) - if err != nil { - return err - } - _, err = w.Write([]byte(name)) - if err != nil { - return err - } - var zero [4]byte - _, err = w.Write(zero[:rl-rlb]) - if err != nil { - return err - } - left -= rl - return nil - } - if err := writeEntry(dir.Number, "."); err != nil { - return err - } - if err := writeEntry(parent.Number, ".."); err != nil { - return err - } - - // Follow e2fsck's convention and sort the children by inode number. - var children []string - for name := range dir.Children { - children = append(children, name) - } - sort.Slice(children, func(i, j int) bool { - left_num := dir.Children[children[i]].Number - right_num := dir.Children[children[j]].Number - - if left_num == right_num { - return children[i] < children[j] - } - return left_num < right_num - }) - - for _, name := range children { - child := dir.Children[name] - if err := writeEntry(child.Number, name); err != nil { - return err - } - } - if err := finishBlock(); err != nil { - return err - } - w.curInode.Size = w.dataWritten - w.dataMax = w.dataWritten - return nil -} - -func (w *Writer) writeDirectoryRecursive(dir, parent *inode) error { - if err := w.writeDirectory(dir, parent); err != nil { - return err - } - - // Follow e2fsck's convention and sort the children by inode number. - var children []string - for name := range dir.Children { - children = append(children, name) - } - sort.Slice(children, func(i, j int) bool { - left_num := dir.Children[children[i]].Number - right_num := dir.Children[children[j]].Number - - if left_num == right_num { - return children[i] < children[j] - } - return left_num < right_num - }) - - for _, name := range children { - child := dir.Children[name] - if child.IsDir() { - if err := w.writeDirectoryRecursive(child, dir); err != nil { - return err - } - } - } - return nil -} - -func (w *Writer) writeInodeTable(tableSize uint32) error { - var b bytes.Buffer - for _, inode := range w.inodes { - if inode != nil { - binode := format.Inode{ - Mode: inode.Mode, - Uid: uint16(inode.Uid & 0xffff), - Gid: uint16(inode.Gid & 0xffff), - SizeLow: uint32(inode.Size & 0xffffffff), - SizeHigh: uint32(inode.Size >> 32), - LinksCount: uint16(inode.LinkCount), - BlocksLow: inode.BlockCount, - Flags: inode.Flags, - XattrBlockLow: inode.XattrBlock, - UidHigh: uint16(inode.Uid >> 16), - GidHigh: uint16(inode.Gid >> 16), - ExtraIsize: uint16(inodeUsedSize - 128), - Atime: uint32(inode.Atime), - AtimeExtra: uint32(inode.Atime >> 32), - Ctime: uint32(inode.Ctime), - CtimeExtra: uint32(inode.Ctime >> 32), - Mtime: uint32(inode.Mtime), - MtimeExtra: uint32(inode.Mtime >> 32), - Crtime: uint32(inode.Crtime), - CrtimeExtra: uint32(inode.Crtime >> 32), - } - switch inode.Mode & format.TypeMask { - case format.S_IFDIR, format.S_IFREG, format.S_IFLNK: - n := copy(binode.Block[:], inode.Data) - if n < len(inode.Data) { - // Rewrite the first xattr with the data. - xattr := [1]xattr{{ - Name: "data", - Index: 7, // "system." - Value: inode.Data[n:], - }} - putXattrs(xattr[:], inode.XattrInline[4:], 0) - } - case format.S_IFBLK, format.S_IFCHR: - dev := inode.Devminor&0xff | inode.Devmajor<<8 | (inode.Devminor&0xffffff00)<<12 - binary.LittleEndian.PutUint32(binode.Block[4:], dev) - } - - _ = binary.Write(&b, binary.LittleEndian, binode) - b.Truncate(inodeUsedSize) - n, _ := b.Write(inode.XattrInline) - _, _ = io.CopyN(&b, zero, int64(inodeExtraSize-n)) - } else { - _, _ = io.CopyN(&b, zero, inodeSize) - } - if _, err := w.write(b.Next(inodeSize)); err != nil { - return err - } - } - rest := tableSize - uint32(len(w.inodes)*inodeSize) - if _, err := w.zero(int64(rest)); err != nil { - return err - } - return nil -} - -// NewWriter returns a Writer that writes an ext4 file system to the provided -// WriteSeeker. -func NewWriter(f io.ReadWriteSeeker, opts ...Option) *Writer { - w := &Writer{ - f: f, - bw: bufio.NewWriterSize(f, 65536*8), - maxDiskSize: defaultMaxDiskSize, - } - for _, opt := range opts { - opt(w) - } - return w -} - -// An Option provides extra options to NewWriter. -type Option func(*Writer) - -// InlineData instructs the Writer to write small files into the inode -// structures directly. This creates smaller images but currently is not -// compatible with DAX. -func InlineData(w *Writer) { - w.supportInlineData = true -} - -// MaximumDiskSize instructs the writer to reserve enough metadata space for the -// specified disk size. If not provided, then 16GB is the default. -func MaximumDiskSize(size int64) Option { - return func(w *Writer) { - if size < 0 || size > maxMaxDiskSize { - w.maxDiskSize = maxMaxDiskSize - } else if size == 0 { - w.maxDiskSize = defaultMaxDiskSize - } else { - w.maxDiskSize = (size + BlockSize - 1) &^ (BlockSize - 1) - } - } -} - -func (w *Writer) init() error { - // Skip the defective block inode. - w.inodes = make([]*inode, 1, 32) - // Create the root directory. - root, _ := w.makeInode(&File{ - Mode: format.S_IFDIR | 0755, - }, nil) - root.LinkCount++ // The root is linked to itself. - // Skip until the first non-reserved inode. - w.inodes = append(w.inodes, make([]*inode, inodeFirst-len(w.inodes)-1)...) - maxBlocks := (w.maxDiskSize-1)/BlockSize + 1 - maxGroups := (maxBlocks-1)/blocksPerGroup + 1 - w.gdBlocks = uint32((maxGroups-1)/groupsPerDescriptorBlock + 1) - - // Skip past the superblock and block descriptor table. - w.seekBlock(1 + w.gdBlocks) - w.initialized = true - - // The lost+found directory is required to exist for e2fsck to pass. - if err := w.Create("lost+found", &File{Mode: format.S_IFDIR | 0700}); err != nil { - return err - } - return w.err -} - -func groupCount(blocks uint32, inodes uint32, inodesPerGroup uint32) uint32 { - inodeBlocksPerGroup := inodesPerGroup * inodeSize / BlockSize - dataBlocksPerGroup := blocksPerGroup - inodeBlocksPerGroup - 2 // save room for the bitmaps - - // Increase the block count to ensure there are enough groups for all the - // inodes. - minBlocks := (inodes-1)/inodesPerGroup*dataBlocksPerGroup + 1 - if blocks < minBlocks { - blocks = minBlocks - } - - return (blocks + dataBlocksPerGroup - 1) / dataBlocksPerGroup -} - -func bestGroupCount(blocks uint32, inodes uint32) (groups uint32, inodesPerGroup uint32) { - groups = 0xffffffff - for ipg := uint32(inodesPerGroupIncrement); ipg <= maxInodesPerGroup; ipg += inodesPerGroupIncrement { - g := groupCount(blocks, inodes, ipg) - if g < groups { - groups = g - inodesPerGroup = ipg - } - } - return -} - -func (w *Writer) Close() error { - if err := w.finishInode(); err != nil { - return err - } - root := w.root() - if err := w.writeDirectoryRecursive(root, root); err != nil { - return err - } - // Finish the last inode (probably a directory). - if err := w.finishInode(); err != nil { - return err - } - - // Write the inode table - inodeTableOffset := w.block() - groups, inodesPerGroup := bestGroupCount(inodeTableOffset, uint32(len(w.inodes))) - err := w.writeInodeTable(groups * inodesPerGroup * inodeSize) - if err != nil { - return err - } - - // Write the bitmaps. - bitmapOffset := w.block() - bitmapSize := groups * 2 - validDataSize := bitmapOffset + bitmapSize - diskSize := validDataSize - minSize := (groups-1)*blocksPerGroup + 1 - if diskSize < minSize { - diskSize = minSize - } - - usedGdBlocks := (groups-1)/groupsPerDescriptorBlock + 1 - if usedGdBlocks > w.gdBlocks { - return exceededMaxSizeError{w.maxDiskSize} - } - - gds := make([]format.GroupDescriptor, w.gdBlocks*groupsPerDescriptorBlock) - inodeTableSizePerGroup := inodesPerGroup * inodeSize / BlockSize - var totalUsedBlocks, totalUsedInodes uint32 - for g := uint32(0); g < groups; g++ { - var b [BlockSize * 2]byte - var dirCount, usedInodeCount, usedBlockCount uint16 - - // Block bitmap - if (g+1)*blocksPerGroup <= validDataSize { - // This group is fully allocated. - for j := range b[:BlockSize] { - b[j] = 0xff - } - usedBlockCount = blocksPerGroup - } else if g*blocksPerGroup < validDataSize { - for j := uint32(0); j < validDataSize-g*blocksPerGroup; j++ { - b[j/8] |= 1 << (j % 8) - usedBlockCount++ - } - } - if g == 0 { - // Unused group descriptor blocks should be cleared. - for j := 1 + usedGdBlocks; j < 1+w.gdBlocks; j++ { - b[j/8] &^= 1 << (j % 8) - usedBlockCount-- - } - } - if g == groups-1 && diskSize%blocksPerGroup != 0 { - // Blocks that aren't present in the disk should be marked as - // allocated. - for j := diskSize % blocksPerGroup; j < blocksPerGroup; j++ { - b[j/8] |= 1 << (j % 8) - usedBlockCount++ - } - } - // Inode bitmap - for j := uint32(0); j < inodesPerGroup; j++ { - ino := format.InodeNumber(1 + g*inodesPerGroup + j) - inode := w.getInode(ino) - if ino < inodeFirst || inode != nil { - b[BlockSize+j/8] |= 1 << (j % 8) - usedInodeCount++ - } - if inode != nil && inode.Mode&format.TypeMask == format.S_IFDIR { - dirCount++ - } - } - _, err := w.write(b[:]) - if err != nil { - return err - } - gds[g] = format.GroupDescriptor{ - BlockBitmapLow: bitmapOffset + 2*g, - InodeBitmapLow: bitmapOffset + 2*g + 1, - InodeTableLow: inodeTableOffset + g*inodeTableSizePerGroup, - UsedDirsCountLow: dirCount, - FreeInodesCountLow: uint16(inodesPerGroup) - usedInodeCount, - FreeBlocksCountLow: blocksPerGroup - usedBlockCount, - } - - totalUsedBlocks += uint32(usedBlockCount) - totalUsedInodes += uint32(usedInodeCount) - } - - // Zero up to the disk size. - _, err = w.zero(int64(diskSize-bitmapOffset-bitmapSize) * BlockSize) - if err != nil { - return err - } - - // Write the block descriptors - w.seekBlock(1) - if w.err != nil { - return w.err - } - err = binary.Write(w.bw, binary.LittleEndian, gds) - if err != nil { - return err - } - - // Write the super block - var blk [BlockSize]byte - b := bytes.NewBuffer(blk[:1024]) - sb := &format.SuperBlock{ - InodesCount: inodesPerGroup * groups, - BlocksCountLow: diskSize, - FreeBlocksCountLow: blocksPerGroup*groups - totalUsedBlocks, - FreeInodesCount: inodesPerGroup*groups - totalUsedInodes, - FirstDataBlock: 0, - LogBlockSize: 2, // 2^(10 + 2) - LogClusterSize: 2, - BlocksPerGroup: blocksPerGroup, - ClustersPerGroup: blocksPerGroup, - InodesPerGroup: inodesPerGroup, - Magic: format.SuperBlockMagic, - State: 1, // cleanly unmounted - Errors: 1, // continue on error? - CreatorOS: 0, // Linux - RevisionLevel: 1, // dynamic inode sizes - FirstInode: inodeFirst, - LpfInode: inodeLostAndFound, - InodeSize: inodeSize, - FeatureCompat: format.CompatSparseSuper2 | format.CompatExtAttr, - FeatureIncompat: format.IncompatFiletype | format.IncompatExtents | format.IncompatFlexBg, - FeatureRoCompat: format.RoCompatLargeFile | format.RoCompatHugeFile | format.RoCompatExtraIsize | format.RoCompatReadonly, - MinExtraIsize: extraIsize, - WantExtraIsize: extraIsize, - LogGroupsPerFlex: 31, - } - if w.supportInlineData { - sb.FeatureIncompat |= format.IncompatInlineData - } - _ = binary.Write(b, binary.LittleEndian, sb) - w.seekBlock(0) - if _, err := w.write(blk[:]); err != nil { - return err - } - w.seekBlock(diskSize) - return w.err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/doc.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/doc.go deleted file mode 100644 index 83b2fffb02..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server -// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS). -package hcn diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go deleted file mode 100644 index 17539b8694..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go +++ /dev/null @@ -1,328 +0,0 @@ -//go:build windows - -package hcn - -import ( - "encoding/json" - "fmt" - "syscall" - - "github.com/Microsoft/go-winio/pkg/guid" -) - -//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go hcn.go - -/// HNS V1 API - -//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId -//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? - -/// HCN V2 API - -// Network -//sys hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNetworks? -//sys hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnCreateNetwork? -//sys hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnOpenNetwork? -//sys hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNetwork? -//sys hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNetworkProperties? -//sys hcnDeleteNetwork(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNetwork? -//sys hcnCloseNetwork(network hcnNetwork) (hr error) = computenetwork.HcnCloseNetwork? - -// Endpoint -//sys hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateEndpoints? -//sys hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnCreateEndpoint? -//sys hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnOpenEndpoint? -//sys hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) = computenetwork.HcnModifyEndpoint? -//sys hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryEndpointProperties? -//sys hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteEndpoint? -//sys hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) = computenetwork.HcnCloseEndpoint? - -// Namespace -//sys hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNamespaces? -//sys hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnCreateNamespace? -//sys hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnOpenNamespace? -//sys hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNamespace? -//sys hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNamespaceProperties? -//sys hcnDeleteNamespace(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNamespace? -//sys hcnCloseNamespace(namespace hcnNamespace) (hr error) = computenetwork.HcnCloseNamespace? - -// LoadBalancer -//sys hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateLoadBalancers? -//sys hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnCreateLoadBalancer? -//sys hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnOpenLoadBalancer? -//sys hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) = computenetwork.HcnModifyLoadBalancer? -//sys hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryLoadBalancerProperties? -//sys hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteLoadBalancer? -//sys hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) = computenetwork.HcnCloseLoadBalancer? - -// SDN Routes -//sys hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateSdnRoutes? -//sys hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnCreateSdnRoute? -//sys hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnOpenSdnRoute? -//sys hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) = computenetwork.HcnModifySdnRoute? -//sys hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQuerySdnRouteProperties? -//sys hcnDeleteRoute(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteSdnRoute? -//sys hcnCloseRoute(route hcnRoute) (hr error) = computenetwork.HcnCloseSdnRoute? - -type _guid = guid.GUID - -type hcnNetwork syscall.Handle -type hcnEndpoint syscall.Handle -type hcnNamespace syscall.Handle -type hcnLoadBalancer syscall.Handle -type hcnRoute syscall.Handle - -// SchemaVersion for HCN Objects/Queries. -type SchemaVersion = Version // hcnglobals.go - -// HostComputeQueryFlags are passed in to a HostComputeQuery to determine which -// properties of an object are returned. -type HostComputeQueryFlags uint32 - -var ( - // HostComputeQueryFlagsNone returns an object with the standard properties. - HostComputeQueryFlagsNone HostComputeQueryFlags - // HostComputeQueryFlagsDetailed returns an object with all properties. - HostComputeQueryFlagsDetailed HostComputeQueryFlags = 1 -) - -// HostComputeQuery is the format for HCN queries. -type HostComputeQuery struct { - SchemaVersion SchemaVersion `json:""` - Flags HostComputeQueryFlags `json:",omitempty"` - Filter string `json:",omitempty"` -} - -type ExtraParams struct { - Resources json.RawMessage `json:",omitempty"` - SharedContainers json.RawMessage `json:",omitempty"` - LayeredOn string `json:",omitempty"` - SwitchGuid string `json:",omitempty"` - UtilityVM string `json:",omitempty"` - VirtualMachine string `json:",omitempty"` -} - -type Health struct { - Data interface{} `json:",omitempty"` - Extra ExtraParams `json:",omitempty"` -} - -// defaultQuery generates HCN Query. -// Passed into get/enumerate calls to filter results. -func defaultQuery() HostComputeQuery { - query := HostComputeQuery{ - SchemaVersion: SchemaVersion{ - Major: 2, - Minor: 0, - }, - Flags: HostComputeQueryFlagsNone, - } - return query -} - -// PlatformDoesNotSupportError happens when users are attempting to use a newer shim on an older OS -func platformDoesNotSupportError(featureName string) error { - return fmt.Errorf("platform does not support feature %s", featureName) -} - -// V2ApiSupported returns an error if the HCN version does not support the V2 Apis. -func V2ApiSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.Api.V2 { - return nil - } - return platformDoesNotSupportError("V2 Api/Schema") -} - -func V2SchemaVersion() SchemaVersion { - return SchemaVersion{ - Major: 2, - Minor: 0, - } -} - -// RemoteSubnetSupported returns an error if the HCN version does not support Remote Subnet policies. -func RemoteSubnetSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.RemoteSubnet { - return nil - } - return platformDoesNotSupportError("Remote Subnet") -} - -// HostRouteSupported returns an error if the HCN version does not support Host Route policies. -func HostRouteSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.HostRoute { - return nil - } - return platformDoesNotSupportError("Host Route") -} - -// DSRSupported returns an error if the HCN version does not support Direct Server Return. -func DSRSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.DSR { - return nil - } - return platformDoesNotSupportError("Direct Server Return (DSR)") -} - -// Slash32EndpointPrefixesSupported returns an error if the HCN version does not support configuring endpoints with /32 prefixes. -func Slash32EndpointPrefixesSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.Slash32EndpointPrefixes { - return nil - } - return platformDoesNotSupportError("Slash 32 Endpoint prefixes") -} - -// AclSupportForProtocol252Supported returns an error if the HCN version does not support HNS ACL Policies to support protocol 252 for VXLAN. -func AclSupportForProtocol252Supported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.AclSupportForProtocol252 { - return nil - } - return platformDoesNotSupportError("HNS ACL Policies to support protocol 252 for VXLAN") -} - -// SessionAffinitySupported returns an error if the HCN version does not support Session Affinity. -func SessionAffinitySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.SessionAffinity { - return nil - } - return platformDoesNotSupportError("Session Affinity") -} - -// IPv6DualStackSupported returns an error if the HCN version does not support IPv6DualStack. -func IPv6DualStackSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.IPv6DualStack { - return nil - } - return platformDoesNotSupportError("IPv6 DualStack") -} - -//L4proxySupported returns an error if the HCN version does not support L4Proxy -func L4proxyPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.L4Proxy { - return nil - } - return platformDoesNotSupportError("L4ProxyPolicy") -} - -// L4WfpProxySupported returns an error if the HCN version does not support L4WfpProxy -func L4WfpProxyPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.L4WfpProxy { - return nil - } - return platformDoesNotSupportError("L4WfpProxyPolicy") -} - -// SetPolicySupported returns an error if the HCN version does not support SetPolicy. -func SetPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.SetPolicy { - return nil - } - return platformDoesNotSupportError("SetPolicy") -} - -// VxlanPortSupported returns an error if the HCN version does not support configuring the VXLAN TCP port. -func VxlanPortSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.VxlanPort { - return nil - } - return platformDoesNotSupportError("VXLAN port configuration") -} - -// TierAclPolicySupported returns an error if the HCN version does not support configuring the TierAcl. -func TierAclPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.TierAcl { - return nil - } - return platformDoesNotSupportError("TierAcl") -} - -// NetworkACLPolicySupported returns an error if the HCN version does not support NetworkACLPolicy -func NetworkACLPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.NetworkACL { - return nil - } - return platformDoesNotSupportError("NetworkACL") -} - -// NestedIpSetSupported returns an error if the HCN version does not support NestedIpSet -func NestedIpSetSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.NestedIpSet { - return nil - } - return platformDoesNotSupportError("NestedIpSet") -} - -// RequestType are the different operations performed to settings. -// Used to update the settings of Endpoint/Namespace objects. -type RequestType string - -var ( - // RequestTypeAdd adds the provided settings object. - RequestTypeAdd RequestType = "Add" - // RequestTypeRemove removes the provided settings object. - RequestTypeRemove RequestType = "Remove" - // RequestTypeUpdate replaces settings with the ones provided. - RequestTypeUpdate RequestType = "Update" - // RequestTypeRefresh refreshes the settings provided. - RequestTypeRefresh RequestType = "Refresh" -) diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go deleted file mode 100644 index 267bbe7cb1..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go +++ /dev/null @@ -1,390 +0,0 @@ -//go:build windows - -package hcn - -import ( - "encoding/json" - "errors" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// IpConfig is associated with an endpoint -type IpConfig struct { - IpAddress string `json:",omitempty"` - PrefixLength uint8 `json:",omitempty"` -} - -// EndpointFlags are special settings on an endpoint. -type EndpointFlags uint32 - -var ( - // EndpointFlagsNone is the default. - EndpointFlagsNone EndpointFlags - // EndpointFlagsRemoteEndpoint means that an endpoint is on another host. - EndpointFlagsRemoteEndpoint EndpointFlags = 1 -) - -// HostComputeEndpoint represents a network endpoint -type HostComputeEndpoint struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - HostComputeNetwork string `json:",omitempty"` // GUID - HostComputeNamespace string `json:",omitempty"` // GUID - Policies []EndpointPolicy `json:",omitempty"` - IpConfigurations []IpConfig `json:",omitempty"` - Dns Dns `json:",omitempty"` - Routes []Route `json:",omitempty"` - MacAddress string `json:",omitempty"` - Flags EndpointFlags `json:",omitempty"` - Health Health `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` -} - -// EndpointResourceType are the two different Endpoint settings resources. -type EndpointResourceType string - -var ( - // EndpointResourceTypePolicy is for Endpoint Policies. Ex: ACL, NAT - EndpointResourceTypePolicy EndpointResourceType = "Policy" - // EndpointResourceTypePort is for Endpoint Port settings. - EndpointResourceTypePort EndpointResourceType = "Port" -) - -// ModifyEndpointSettingRequest is the structure used to send request to modify an endpoint. -// Used to update policy/port on an endpoint. -type ModifyEndpointSettingRequest struct { - ResourceType EndpointResourceType `json:",omitempty"` // Policy, Port - RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh - Settings json.RawMessage `json:",omitempty"` -} - -// VmEndpointRequest creates a switch port with identifier `PortId`. -type VmEndpointRequest struct { - PortId guid.GUID `json:",omitempty"` - VirtualNicName string `json:",omitempty"` - VirtualMachineId guid.GUID `json:",omitempty"` -} - -type PolicyEndpointRequest struct { - Policies []EndpointPolicy `json:",omitempty"` -} - -func getEndpoint(endpointGuid guid.GUID, query string) (*HostComputeEndpoint, error) { - // Open endpoint. - var ( - endpointHandle hcnEndpoint - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer) - if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil { - return nil, err - } - // Query endpoint. - hr = hcnQueryEndpointProperties(endpointHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close endpoint. - hr = hcnCloseEndpoint(endpointHandle) - if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeEndpoint - var outputEndpoint HostComputeEndpoint - if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { - return nil, err - } - return &outputEndpoint, nil -} - -func enumerateEndpoints(query string) ([]HostComputeEndpoint, error) { - // Enumerate all Endpoint Guids - var ( - resultBuffer *uint16 - endpointBuffer *uint16 - ) - hr := hcnEnumerateEndpoints(query, &endpointBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateEndpoints", hr, resultBuffer); err != nil { - return nil, err - } - - endpoints := interop.ConvertAndFreeCoTaskMemString(endpointBuffer) - var endpointIds []guid.GUID - err := json.Unmarshal([]byte(endpoints), &endpointIds) - if err != nil { - return nil, err - } - - var outputEndpoints []HostComputeEndpoint - for _, endpointGuid := range endpointIds { - endpoint, err := getEndpoint(endpointGuid, query) - if err != nil { - return nil, err - } - outputEndpoints = append(outputEndpoints, *endpoint) - } - return outputEndpoints, nil -} - -func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndpoint, error) { - networkGuid, err := guid.FromString(networkId) - if err != nil { - return nil, errInvalidNetworkID - } - // Open network. - var networkHandle hcnNetwork - var resultBuffer *uint16 - hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Create endpoint. - endpointId := guid.GUID{} - var endpointHandle hcnEndpoint - hr = hcnCreateEndpoint(networkHandle, &endpointId, endpointSettings, &endpointHandle, &resultBuffer) - if err := checkForErrors("hcnCreateEndpoint", hr, resultBuffer); err != nil { - return nil, err - } - // Query endpoint. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - var propertiesBuffer *uint16 - hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close endpoint. - hr = hcnCloseEndpoint(endpointHandle) - if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { - return nil, err - } - // Close network. - hr = hcnCloseNetwork(networkHandle) - if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeEndpoint - var outputEndpoint HostComputeEndpoint - if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { - return nil, err - } - return &outputEndpoint, nil -} - -func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, error) { - endpointGuid, err := guid.FromString(endpointId) - if err != nil { - return nil, errInvalidEndpointID - } - // Open endpoint - var ( - endpointHandle hcnEndpoint - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer) - if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil { - return nil, err - } - // Modify endpoint - hr = hcnModifyEndpoint(endpointHandle, settings, &resultBuffer) - if err := checkForErrors("hcnModifyEndpoint", hr, resultBuffer); err != nil { - return nil, err - } - // Query endpoint. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close endpoint. - hr = hcnCloseEndpoint(endpointHandle) - if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeEndpoint - var outputEndpoint HostComputeEndpoint - if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { - return nil, err - } - return &outputEndpoint, nil -} - -func deleteEndpoint(endpointId string) error { - endpointGuid, err := guid.FromString(endpointId) - if err != nil { - return errInvalidEndpointID - } - var resultBuffer *uint16 - hr := hcnDeleteEndpoint(&endpointGuid, &resultBuffer) - if err := checkForErrors("hcnDeleteEndpoint", hr, resultBuffer); err != nil { - return err - } - return nil -} - -// ListEndpoints makes a call to list all available endpoints. -func ListEndpoints() ([]HostComputeEndpoint, error) { - hcnQuery := defaultQuery() - endpoints, err := ListEndpointsQuery(hcnQuery) - if err != nil { - return nil, err - } - return endpoints, nil -} - -// ListEndpointsQuery makes a call to query the list of available endpoints. -func ListEndpointsQuery(query HostComputeQuery) ([]HostComputeEndpoint, error) { - queryJson, err := json.Marshal(query) - if err != nil { - return nil, err - } - - endpoints, err := enumerateEndpoints(string(queryJson)) - if err != nil { - return nil, err - } - return endpoints, nil -} - -// ListEndpointsOfNetwork queries the list of endpoints on a network. -func ListEndpointsOfNetwork(networkId string) ([]HostComputeEndpoint, error) { - hcnQuery := defaultQuery() - // TODO: Once query can convert schema, change to {HostComputeNetwork:networkId} - mapA := map[string]string{"VirtualNetwork": networkId} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - return ListEndpointsQuery(hcnQuery) -} - -// GetEndpointByID returns an endpoint specified by Id -func GetEndpointByID(endpointId string) (*HostComputeEndpoint, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": endpointId} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - endpoints, err := ListEndpointsQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(endpoints) == 0 { - return nil, EndpointNotFoundError{EndpointID: endpointId} - } - return &endpoints[0], err -} - -// GetEndpointByName returns an endpoint specified by Name -func GetEndpointByName(endpointName string) (*HostComputeEndpoint, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"Name": endpointName} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - endpoints, err := ListEndpointsQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(endpoints) == 0 { - return nil, EndpointNotFoundError{EndpointName: endpointName} - } - return &endpoints[0], err -} - -// Create Endpoint. -func (endpoint *HostComputeEndpoint) Create() (*HostComputeEndpoint, error) { - logrus.Debugf("hcn::HostComputeEndpoint::Create id=%s", endpoint.Id) - - if endpoint.HostComputeNamespace != "" { - return nil, errors.New("endpoint create error, endpoint json HostComputeNamespace is read only and should not be set") - } - - jsonString, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeEndpoint::Create JSON: %s", jsonString) - endpoint, hcnErr := createEndpoint(endpoint.HostComputeNetwork, string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return endpoint, nil -} - -// Delete Endpoint. -func (endpoint *HostComputeEndpoint) Delete() error { - logrus.Debugf("hcn::HostComputeEndpoint::Delete id=%s", endpoint.Id) - - if err := deleteEndpoint(endpoint.Id); err != nil { - return err - } - return nil -} - -// ModifyEndpointSettings updates the Port/Policy of an Endpoint. -func ModifyEndpointSettings(endpointId string, request *ModifyEndpointSettingRequest) error { - logrus.Debugf("hcn::HostComputeEndpoint::ModifyEndpointSettings id=%s", endpointId) - - endpointSettingsRequest, err := json.Marshal(request) - if err != nil { - return err - } - - _, err = modifyEndpoint(endpointId, string(endpointSettingsRequest)) - if err != nil { - return err - } - return nil -} - -// ApplyPolicy applies a Policy (ex: ACL) on the Endpoint. -func (endpoint *HostComputeEndpoint) ApplyPolicy(requestType RequestType, endpointPolicy PolicyEndpointRequest) error { - logrus.Debugf("hcn::HostComputeEndpoint::ApplyPolicy id=%s", endpoint.Id) - - settingsJson, err := json.Marshal(endpointPolicy) - if err != nil { - return err - } - requestMessage := &ModifyEndpointSettingRequest{ - ResourceType: EndpointResourceTypePolicy, - RequestType: requestType, - Settings: settingsJson, - } - - return ModifyEndpointSettings(endpoint.Id, requestMessage) -} - -// NamespaceAttach modifies a Namespace to add an endpoint. -func (endpoint *HostComputeEndpoint) NamespaceAttach(namespaceId string) error { - return AddNamespaceEndpoint(namespaceId, endpoint.Id) -} - -// NamespaceDetach modifies a Namespace to remove an endpoint. -func (endpoint *HostComputeEndpoint) NamespaceDetach(namespaceId string) error { - return RemoveNamespaceEndpoint(namespaceId, endpoint.Id) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go deleted file mode 100644 index 8b719fa112..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go +++ /dev/null @@ -1,164 +0,0 @@ -//go:build windows - -package hcn - -import ( - "errors" - "fmt" - - "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -var ( - errInvalidNetworkID = errors.New("invalid network ID") - errInvalidEndpointID = errors.New("invalid endpoint ID") - errInvalidNamespaceID = errors.New("invalid namespace ID") - errInvalidLoadBalancerID = errors.New("invalid load balancer ID") - errInvalidRouteID = errors.New("invalid route ID") -) - -func checkForErrors(methodName string, hr error, resultBuffer *uint16) error { - errorFound := false - - if hr != nil { - errorFound = true - } - - result := "" - if resultBuffer != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultBuffer) - if result != "" { - errorFound = true - } - } - - if errorFound { - returnError := new(hr, methodName, result) - logrus.Debugf(returnError.Error()) // HCN errors logged for debugging. - return returnError - } - - return nil -} - -type ErrorCode uint32 - -// For common errors, define the error as it is in windows, so we can quickly determine it later -const ( - ERROR_NOT_FOUND = 0x490 - HCN_E_PORT_ALREADY_EXISTS ErrorCode = 0x803b0013 -) - -type HcnError struct { - *hcserror.HcsError - code ErrorCode -} - -func (e *HcnError) Error() string { - return e.HcsError.Error() -} - -func CheckErrorWithCode(err error, code ErrorCode) bool { - hcnError, ok := err.(*HcnError) - if ok { - return hcnError.code == code - } - return false -} - -func IsElementNotFoundError(err error) bool { - return CheckErrorWithCode(err, ERROR_NOT_FOUND) -} - -func IsPortAlreadyExistsError(err error) bool { - return CheckErrorWithCode(err, HCN_E_PORT_ALREADY_EXISTS) -} - -func new(hr error, title string, rest string) error { - err := &HcnError{} - hcsError := hcserror.New(hr, title, rest) - err.HcsError = hcsError.(*hcserror.HcsError) - err.code = ErrorCode(hcserror.Win32FromError(hr)) - return err -} - -// -// Note that the below errors are not errors returned by hcn itself -// we wish to separate them as they are shim usage error -// - -// NetworkNotFoundError results from a failed search for a network by Id or Name -type NetworkNotFoundError struct { - NetworkName string - NetworkID string -} - -func (e NetworkNotFoundError) Error() string { - if e.NetworkName != "" { - return fmt.Sprintf("Network name %q not found", e.NetworkName) - } - return fmt.Sprintf("Network ID %q not found", e.NetworkID) -} - -// EndpointNotFoundError results from a failed search for an endpoint by Id or Name -type EndpointNotFoundError struct { - EndpointName string - EndpointID string -} - -func (e EndpointNotFoundError) Error() string { - if e.EndpointName != "" { - return fmt.Sprintf("Endpoint name %q not found", e.EndpointName) - } - return fmt.Sprintf("Endpoint ID %q not found", e.EndpointID) -} - -// NamespaceNotFoundError results from a failed search for a namsepace by Id -type NamespaceNotFoundError struct { - NamespaceID string -} - -func (e NamespaceNotFoundError) Error() string { - return fmt.Sprintf("Namespace ID %q not found", e.NamespaceID) -} - -// LoadBalancerNotFoundError results from a failed search for a loadbalancer by Id -type LoadBalancerNotFoundError struct { - LoadBalancerId string -} - -func (e LoadBalancerNotFoundError) Error() string { - return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId) -} - -// RouteNotFoundError results from a failed search for a route by Id -type RouteNotFoundError struct { - RouteId string -} - -func (e RouteNotFoundError) Error() string { - return fmt.Sprintf("SDN Route %q not found", e.RouteId) -} - -// IsNotFoundError returns a boolean indicating whether the error was caused by -// a resource not being found. -func IsNotFoundError(err error) bool { - switch pe := err.(type) { - case NetworkNotFoundError: - return true - case EndpointNotFoundError: - return true - case NamespaceNotFoundError: - return true - case LoadBalancerNotFoundError: - return true - case RouteNotFoundError: - return true - case *hcserror.HcsError: - return pe.Err == hcs.ErrElementNotFound - } - return false -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go deleted file mode 100644 index 25e368fc23..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go +++ /dev/null @@ -1,140 +0,0 @@ -//go:build windows - -package hcn - -import ( - "encoding/json" - "fmt" - "math" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// Globals are all global properties of the HCN Service. -type Globals struct { - Version Version `json:"Version"` -} - -// Version is the HCN Service version. -type Version struct { - Major int `json:"Major"` - Minor int `json:"Minor"` -} - -type VersionRange struct { - MinVersion Version - MaxVersion Version -} - -type VersionRanges []VersionRange - -var ( - // HNSVersion1803 added ACL functionality. - HNSVersion1803 = VersionRanges{VersionRange{MinVersion: Version{Major: 7, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // V2ApiSupport allows the use of V2 Api calls and V2 Schema. - V2ApiSupport = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // Remote Subnet allows for Remote Subnet policies on Overlay networks - RemoteSubnetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // A Host Route policy allows for local container to local host communication Overlay networks - HostRouteVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // HNS 9.3 through 10.0 (not included), and 10.2+ allows for Direct Server Return for loadbalancing - DSRVersion = VersionRanges{ - VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 10, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - // HNS 9.3 through 10.0 (not included) and, 10.4+ provide support for configuring endpoints with /32 prefixes - Slash32EndpointPrefixesVersion = VersionRanges{ - VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 10, Minor: 4}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - // HNS 9.3 through 10.0 (not included) and, 10.4+ allow for HNS ACL Policies to support protocol 252 for VXLAN - AclSupportForProtocol252Version = VersionRanges{ - VersionRange{MinVersion: Version{Major: 11, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - // HNS 12.0 allows for session affinity for loadbalancing - SessionAffinityVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 12, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // HNS 11.10+ supports Ipv6 dual stack. - IPv6DualStackVersion = VersionRanges{ - VersionRange{MinVersion: Version{Major: 11, Minor: 10}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - // HNS 13.0 allows for Set Policy support - SetPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // HNS 10.3 allows for VXLAN ports - VxlanPortVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 10, Minor: 3}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - - //HNS 9.5 through 10.0(not included), 10.5 through 11.0(not included), 11.11 through 12.0(not included), 12.1 through 13.0(not included), 13.1+ allows for Network L4Proxy Policy support - L4ProxyPolicyVersion = VersionRanges{ - VersionRange{MinVersion: Version{Major: 9, Minor: 5}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 10, Minor: 5}, MaxVersion: Version{Major: 10, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 11, Minor: 11}, MaxVersion: Version{Major: 11, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 12, Minor: 1}, MaxVersion: Version{Major: 12, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 13, Minor: 1}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - - //HNS 13.2 allows for L4WfpProxy Policy support - L4WfpProxyPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - - //HNS 14.0 allows for TierAcl Policy support - TierAclPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 14, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - - //HNS 15.0 allows for NetworkACL Policy support - NetworkACLPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 15, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - - //HNS 15.0 allows for NestedIpSet support - NestedIpSetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 15, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} -) - -// GetGlobals returns the global properties of the HCN Service. -func GetGlobals() (*Globals, error) { - var version Version - err := hnsCall("GET", "/globals/version", "", &version) - if err != nil { - return nil, err - } - - globals := &Globals{ - Version: version, - } - - return globals, nil -} - -type hnsResponse struct { - Success bool - Error string - Output json.RawMessage -} - -func hnsCall(method, path, request string, returnResponse interface{}) error { - var responseBuffer *uint16 - logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) - - err := _hnsCall(method, path, request, &responseBuffer) - if err != nil { - return hcserror.New(err, "hnsCall", "") - } - response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) - - hnsresponse := &hnsResponse{} - if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { - return err - } - - if !hnsresponse.Success { - return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error) - } - - if len(hnsresponse.Output) == 0 { - return nil - } - - logrus.Debugf("Network Response : %s", hnsresponse.Output) - err = json.Unmarshal(hnsresponse.Output, returnResponse) - if err != nil { - return err - } - - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go deleted file mode 100644 index f68d39053e..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go +++ /dev/null @@ -1,313 +0,0 @@ -//go:build windows - -package hcn - -import ( - "encoding/json" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// LoadBalancerPortMapping is associated with HostComputeLoadBalancer -type LoadBalancerPortMapping struct { - Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 - InternalPort uint16 `json:",omitempty"` - ExternalPort uint16 `json:",omitempty"` - DistributionType LoadBalancerDistribution `json:",omitempty"` // EX: Distribute per connection = 0, distribute traffic of the same protocol per client IP = 1, distribute per client IP = 2 - Flags LoadBalancerPortMappingFlags `json:",omitempty"` -} - -// HostComputeLoadBalancer represents software load balancer. -type HostComputeLoadBalancer struct { - Id string `json:"ID,omitempty"` - HostComputeEndpoints []string `json:",omitempty"` - SourceVIP string `json:",omitempty"` - FrontendVIPs []string `json:",omitempty"` - PortMappings []LoadBalancerPortMapping `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` - Flags LoadBalancerFlags `json:",omitempty"` // 0: None, 1: EnableDirectServerReturn -} - -//LoadBalancerFlags modify settings for a loadbalancer. -type LoadBalancerFlags uint32 - -var ( - // LoadBalancerFlagsNone is the default. - LoadBalancerFlagsNone LoadBalancerFlags = 0 - // LoadBalancerFlagsDSR enables Direct Server Return (DSR) - LoadBalancerFlagsDSR LoadBalancerFlags = 1 - LoadBalancerFlagsIPv6 LoadBalancerFlags = 2 -) - -// LoadBalancerPortMappingFlags are special settings on a loadbalancer. -type LoadBalancerPortMappingFlags uint32 - -var ( - // LoadBalancerPortMappingFlagsNone is the default. - LoadBalancerPortMappingFlagsNone LoadBalancerPortMappingFlags - // LoadBalancerPortMappingFlagsILB enables internal loadbalancing. - LoadBalancerPortMappingFlagsILB LoadBalancerPortMappingFlags = 1 - // LoadBalancerPortMappingFlagsLocalRoutedVIP enables VIP access from the host. - LoadBalancerPortMappingFlagsLocalRoutedVIP LoadBalancerPortMappingFlags = 2 - // LoadBalancerPortMappingFlagsUseMux enables DSR for NodePort access of VIP. - LoadBalancerPortMappingFlagsUseMux LoadBalancerPortMappingFlags = 4 - // LoadBalancerPortMappingFlagsPreserveDIP delivers packets with destination IP as the VIP. - LoadBalancerPortMappingFlagsPreserveDIP LoadBalancerPortMappingFlags = 8 -) - -// LoadBalancerDistribution specifies how the loadbalancer distributes traffic. -type LoadBalancerDistribution uint32 - -var ( - // LoadBalancerDistributionNone is the default and loadbalances each connection to the same pod. - LoadBalancerDistributionNone LoadBalancerDistribution - // LoadBalancerDistributionSourceIPProtocol loadbalances all traffic of the same protocol from a client IP to the same pod. - LoadBalancerDistributionSourceIPProtocol LoadBalancerDistribution = 1 - // LoadBalancerDistributionSourceIP loadbalances all traffic from a client IP to the same pod. - LoadBalancerDistributionSourceIP LoadBalancerDistribution = 2 -) - -func getLoadBalancer(loadBalancerGuid guid.GUID, query string) (*HostComputeLoadBalancer, error) { - // Open loadBalancer. - var ( - loadBalancerHandle hcnLoadBalancer - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenLoadBalancer(&loadBalancerGuid, &loadBalancerHandle, &resultBuffer) - if err := checkForErrors("hcnOpenLoadBalancer", hr, resultBuffer); err != nil { - return nil, err - } - // Query loadBalancer. - hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close loadBalancer. - hr = hcnCloseLoadBalancer(loadBalancerHandle) - if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeLoadBalancer - var outputLoadBalancer HostComputeLoadBalancer - if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { - return nil, err - } - return &outputLoadBalancer, nil -} - -func enumerateLoadBalancers(query string) ([]HostComputeLoadBalancer, error) { - // Enumerate all LoadBalancer Guids - var ( - resultBuffer *uint16 - loadBalancerBuffer *uint16 - ) - hr := hcnEnumerateLoadBalancers(query, &loadBalancerBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateLoadBalancers", hr, resultBuffer); err != nil { - return nil, err - } - - loadBalancers := interop.ConvertAndFreeCoTaskMemString(loadBalancerBuffer) - var loadBalancerIds []guid.GUID - if err := json.Unmarshal([]byte(loadBalancers), &loadBalancerIds); err != nil { - return nil, err - } - - var outputLoadBalancers []HostComputeLoadBalancer - for _, loadBalancerGuid := range loadBalancerIds { - loadBalancer, err := getLoadBalancer(loadBalancerGuid, query) - if err != nil { - return nil, err - } - outputLoadBalancers = append(outputLoadBalancers, *loadBalancer) - } - return outputLoadBalancers, nil -} - -func createLoadBalancer(settings string) (*HostComputeLoadBalancer, error) { - // Create new loadBalancer. - var ( - loadBalancerHandle hcnLoadBalancer - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - loadBalancerGuid := guid.GUID{} - hr := hcnCreateLoadBalancer(&loadBalancerGuid, settings, &loadBalancerHandle, &resultBuffer) - if err := checkForErrors("hcnCreateLoadBalancer", hr, resultBuffer); err != nil { - return nil, err - } - // Query loadBalancer. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close loadBalancer. - hr = hcnCloseLoadBalancer(loadBalancerHandle) - if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeLoadBalancer - var outputLoadBalancer HostComputeLoadBalancer - if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { - return nil, err - } - return &outputLoadBalancer, nil -} - -func deleteLoadBalancer(loadBalancerId string) error { - loadBalancerGuid, err := guid.FromString(loadBalancerId) - if err != nil { - return errInvalidLoadBalancerID - } - var resultBuffer *uint16 - hr := hcnDeleteLoadBalancer(&loadBalancerGuid, &resultBuffer) - if err := checkForErrors("hcnDeleteLoadBalancer", hr, resultBuffer); err != nil { - return err - } - return nil -} - -// ListLoadBalancers makes a call to list all available loadBalancers. -func ListLoadBalancers() ([]HostComputeLoadBalancer, error) { - hcnQuery := defaultQuery() - loadBalancers, err := ListLoadBalancersQuery(hcnQuery) - if err != nil { - return nil, err - } - return loadBalancers, nil -} - -// ListLoadBalancersQuery makes a call to query the list of available loadBalancers. -func ListLoadBalancersQuery(query HostComputeQuery) ([]HostComputeLoadBalancer, error) { - queryJson, err := json.Marshal(query) - if err != nil { - return nil, err - } - - loadBalancers, err := enumerateLoadBalancers(string(queryJson)) - if err != nil { - return nil, err - } - return loadBalancers, nil -} - -// GetLoadBalancerByID returns the LoadBalancer specified by Id. -func GetLoadBalancerByID(loadBalancerId string) (*HostComputeLoadBalancer, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": loadBalancerId} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - loadBalancers, err := ListLoadBalancersQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(loadBalancers) == 0 { - return nil, LoadBalancerNotFoundError{LoadBalancerId: loadBalancerId} - } - return &loadBalancers[0], err -} - -// Create LoadBalancer. -func (loadBalancer *HostComputeLoadBalancer) Create() (*HostComputeLoadBalancer, error) { - logrus.Debugf("hcn::HostComputeLoadBalancer::Create id=%s", loadBalancer.Id) - - jsonString, err := json.Marshal(loadBalancer) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeLoadBalancer::Create JSON: %s", jsonString) - loadBalancer, hcnErr := createLoadBalancer(string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return loadBalancer, nil -} - -// Delete LoadBalancer. -func (loadBalancer *HostComputeLoadBalancer) Delete() error { - logrus.Debugf("hcn::HostComputeLoadBalancer::Delete id=%s", loadBalancer.Id) - - if err := deleteLoadBalancer(loadBalancer.Id); err != nil { - return err - } - return nil -} - -// AddEndpoint add an endpoint to a LoadBalancer -func (loadBalancer *HostComputeLoadBalancer) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { - logrus.Debugf("hcn::HostComputeLoadBalancer::AddEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id) - - err := loadBalancer.Delete() - if err != nil { - return nil, err - } - - // Add Endpoint to the Existing List - loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id) - - return loadBalancer.Create() -} - -// RemoveEndpoint removes an endpoint from a LoadBalancer -func (loadBalancer *HostComputeLoadBalancer) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { - logrus.Debugf("hcn::HostComputeLoadBalancer::RemoveEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id) - - err := loadBalancer.Delete() - if err != nil { - return nil, err - } - - // Create a list of all the endpoints besides the one being removed - var endpoints []string - for _, endpointReference := range loadBalancer.HostComputeEndpoints { - if endpointReference == endpoint.Id { - continue - } - endpoints = append(endpoints, endpointReference) - } - loadBalancer.HostComputeEndpoints = endpoints - return loadBalancer.Create() -} - -// AddLoadBalancer for the specified endpoints -func AddLoadBalancer(endpoints []HostComputeEndpoint, flags LoadBalancerFlags, portMappingFlags LoadBalancerPortMappingFlags, sourceVIP string, frontendVIPs []string, protocol uint16, internalPort uint16, externalPort uint16) (*HostComputeLoadBalancer, error) { - logrus.Debugf("hcn::HostComputeLoadBalancer::AddLoadBalancer endpointId=%v, LoadBalancerFlags=%v, LoadBalancerPortMappingFlags=%v, sourceVIP=%s, frontendVIPs=%v, protocol=%v, internalPort=%v, externalPort=%v", endpoints, flags, portMappingFlags, sourceVIP, frontendVIPs, protocol, internalPort, externalPort) - - loadBalancer := &HostComputeLoadBalancer{ - SourceVIP: sourceVIP, - PortMappings: []LoadBalancerPortMapping{ - { - Protocol: uint32(protocol), - InternalPort: internalPort, - ExternalPort: externalPort, - Flags: portMappingFlags, - }, - }, - FrontendVIPs: frontendVIPs, - SchemaVersion: SchemaVersion{ - Major: 2, - Minor: 0, - }, - Flags: flags, - } - - for _, endpoint := range endpoints { - loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id) - } - - return loadBalancer.Create() -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go deleted file mode 100644 index 44ba2fa1fd..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go +++ /dev/null @@ -1,448 +0,0 @@ -//go:build windows - -package hcn - -import ( - "encoding/json" - "os" - "syscall" - - "github.com/Microsoft/go-winio/pkg/guid" - icni "github.com/Microsoft/hcsshim/internal/cni" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/regstate" - "github.com/Microsoft/hcsshim/internal/runhcs" - "github.com/sirupsen/logrus" -) - -// NamespaceResourceEndpoint represents an Endpoint attached to a Namespace. -type NamespaceResourceEndpoint struct { - Id string `json:"ID,"` -} - -// NamespaceResourceContainer represents a Container attached to a Namespace. -type NamespaceResourceContainer struct { - Id string `json:"ID,"` -} - -// NamespaceResourceType determines whether the Namespace resource is a Container or Endpoint. -type NamespaceResourceType string - -var ( - // NamespaceResourceTypeContainer are containers associated with a Namespace. - NamespaceResourceTypeContainer NamespaceResourceType = "Container" - // NamespaceResourceTypeEndpoint are endpoints associated with a Namespace. - NamespaceResourceTypeEndpoint NamespaceResourceType = "Endpoint" -) - -// NamespaceResource is associated with a namespace -type NamespaceResource struct { - Type NamespaceResourceType `json:","` // Container, Endpoint - Data json.RawMessage `json:","` -} - -// NamespaceType determines whether the Namespace is for a Host or Guest -type NamespaceType string - -var ( - // NamespaceTypeHost are host namespaces. - NamespaceTypeHost NamespaceType = "Host" - // NamespaceTypeHostDefault are host namespaces in the default compartment. - NamespaceTypeHostDefault NamespaceType = "HostDefault" - // NamespaceTypeGuest are guest namespaces. - NamespaceTypeGuest NamespaceType = "Guest" - // NamespaceTypeGuestDefault are guest namespaces in the default compartment. - NamespaceTypeGuestDefault NamespaceType = "GuestDefault" -) - -// HostComputeNamespace represents a namespace (AKA compartment) in -type HostComputeNamespace struct { - Id string `json:"ID,omitempty"` - NamespaceId uint32 `json:",omitempty"` - Type NamespaceType `json:",omitempty"` // Host, HostDefault, Guest, GuestDefault - Resources []NamespaceResource `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` -} - -// ModifyNamespaceSettingRequest is the structure used to send request to modify a namespace. -// Used to Add/Remove an endpoints and containers to/from a namespace. -type ModifyNamespaceSettingRequest struct { - ResourceType NamespaceResourceType `json:",omitempty"` // Container, Endpoint - RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh - Settings json.RawMessage `json:",omitempty"` -} - -func getNamespace(namespaceGuid guid.GUID, query string) (*HostComputeNamespace, error) { - // Open namespace. - var ( - namespaceHandle hcnNamespace - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { - return nil, err - } - // Query namespace. - hr = hcnQueryNamespaceProperties(namespaceHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close namespace. - hr = hcnCloseNamespace(namespaceHandle) - if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNamespace - var outputNamespace HostComputeNamespace - if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { - return nil, err - } - return &outputNamespace, nil -} - -func enumerateNamespaces(query string) ([]HostComputeNamespace, error) { - // Enumerate all Namespace Guids - var ( - resultBuffer *uint16 - namespaceBuffer *uint16 - ) - hr := hcnEnumerateNamespaces(query, &namespaceBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateNamespaces", hr, resultBuffer); err != nil { - return nil, err - } - - namespaces := interop.ConvertAndFreeCoTaskMemString(namespaceBuffer) - var namespaceIds []guid.GUID - if err := json.Unmarshal([]byte(namespaces), &namespaceIds); err != nil { - return nil, err - } - - var outputNamespaces []HostComputeNamespace - for _, namespaceGuid := range namespaceIds { - namespace, err := getNamespace(namespaceGuid, query) - if err != nil { - return nil, err - } - outputNamespaces = append(outputNamespaces, *namespace) - } - return outputNamespaces, nil -} - -func createNamespace(settings string) (*HostComputeNamespace, error) { - // Create new namespace. - var ( - namespaceHandle hcnNamespace - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - namespaceGuid := guid.GUID{} - hr := hcnCreateNamespace(&namespaceGuid, settings, &namespaceHandle, &resultBuffer) - if err := checkForErrors("hcnCreateNamespace", hr, resultBuffer); err != nil { - return nil, err - } - // Query namespace. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close namespace. - hr = hcnCloseNamespace(namespaceHandle) - if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNamespace - var outputNamespace HostComputeNamespace - if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { - return nil, err - } - return &outputNamespace, nil -} - -func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace, error) { - namespaceGuid, err := guid.FromString(namespaceId) - if err != nil { - return nil, errInvalidNamespaceID - } - // Open namespace. - var ( - namespaceHandle hcnNamespace - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { - return nil, err - } - // Modify namespace. - hr = hcnModifyNamespace(namespaceHandle, settings, &resultBuffer) - if err := checkForErrors("hcnModifyNamespace", hr, resultBuffer); err != nil { - return nil, err - } - // Query namespace. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close namespace. - hr = hcnCloseNamespace(namespaceHandle) - if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { - return nil, err - } - // Convert output to Namespace - var outputNamespace HostComputeNamespace - if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { - return nil, err - } - return &outputNamespace, nil -} - -func deleteNamespace(namespaceId string) error { - namespaceGuid, err := guid.FromString(namespaceId) - if err != nil { - return errInvalidNamespaceID - } - var resultBuffer *uint16 - hr := hcnDeleteNamespace(&namespaceGuid, &resultBuffer) - if err := checkForErrors("hcnDeleteNamespace", hr, resultBuffer); err != nil { - return err - } - return nil -} - -// ListNamespaces makes a call to list all available namespaces. -func ListNamespaces() ([]HostComputeNamespace, error) { - hcnQuery := defaultQuery() - namespaces, err := ListNamespacesQuery(hcnQuery) - if err != nil { - return nil, err - } - return namespaces, nil -} - -// ListNamespacesQuery makes a call to query the list of available namespaces. -func ListNamespacesQuery(query HostComputeQuery) ([]HostComputeNamespace, error) { - queryJson, err := json.Marshal(query) - if err != nil { - return nil, err - } - - namespaces, err := enumerateNamespaces(string(queryJson)) - if err != nil { - return nil, err - } - return namespaces, nil -} - -// GetNamespaceByID returns the Namespace specified by Id. -func GetNamespaceByID(namespaceId string) (*HostComputeNamespace, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": namespaceId} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - namespaces, err := ListNamespacesQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(namespaces) == 0 { - return nil, NamespaceNotFoundError{NamespaceID: namespaceId} - } - - return &namespaces[0], err -} - -// GetNamespaceEndpointIds returns the endpoints of the Namespace specified by Id. -func GetNamespaceEndpointIds(namespaceId string) ([]string, error) { - namespace, err := GetNamespaceByID(namespaceId) - if err != nil { - return nil, err - } - var endpointsIds []string - for _, resource := range namespace.Resources { - if resource.Type == "Endpoint" { - var endpointResource NamespaceResourceEndpoint - if err := json.Unmarshal([]byte(resource.Data), &endpointResource); err != nil { - return nil, err - } - endpointsIds = append(endpointsIds, endpointResource.Id) - } - } - return endpointsIds, nil -} - -// GetNamespaceContainerIds returns the containers of the Namespace specified by Id. -func GetNamespaceContainerIds(namespaceId string) ([]string, error) { - namespace, err := GetNamespaceByID(namespaceId) - if err != nil { - return nil, err - } - var containerIds []string - for _, resource := range namespace.Resources { - if resource.Type == "Container" { - var containerResource NamespaceResourceContainer - if err := json.Unmarshal([]byte(resource.Data), &containerResource); err != nil { - return nil, err - } - containerIds = append(containerIds, containerResource.Id) - } - } - return containerIds, nil -} - -// NewNamespace creates a new Namespace object -func NewNamespace(nsType NamespaceType) *HostComputeNamespace { - return &HostComputeNamespace{ - Type: nsType, - SchemaVersion: V2SchemaVersion(), - } -} - -// Create Namespace. -func (namespace *HostComputeNamespace) Create() (*HostComputeNamespace, error) { - logrus.Debugf("hcn::HostComputeNamespace::Create id=%s", namespace.Id) - - jsonString, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeNamespace::Create JSON: %s", jsonString) - namespace, hcnErr := createNamespace(string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return namespace, nil -} - -// Delete Namespace. -func (namespace *HostComputeNamespace) Delete() error { - logrus.Debugf("hcn::HostComputeNamespace::Delete id=%s", namespace.Id) - - if err := deleteNamespace(namespace.Id); err != nil { - return err - } - return nil -} - -// Sync Namespace endpoints with the appropriate sandbox container holding the -// network namespace open. If no sandbox container is found for this namespace -// this method is determined to be a success and will not return an error in -// this case. If the sandbox container is found and a sync is initiated any -// failures will be returned via this method. -// -// This call initiates a sync between endpoints and the matching UtilityVM -// hosting those endpoints. It is safe to call for any `NamespaceType` but -// `NamespaceTypeGuest` is the only case when a sync will actually occur. For -// `NamespaceTypeHost` the process container will be automatically synchronized -// when the the endpoint is added via `AddNamespaceEndpoint`. -// -// Note: This method sync's both additions and removals of endpoints from a -// `NamespaceTypeGuest` namespace. -func (namespace *HostComputeNamespace) Sync() error { - logrus.WithField("id", namespace.Id).Debugf("hcs::HostComputeNamespace::Sync") - - // We only attempt a sync for namespace guest. - if namespace.Type != NamespaceTypeGuest { - return nil - } - - // Look in the registry for the key to map from namespace id to pod-id - cfg, err := icni.LoadPersistedNamespaceConfig(namespace.Id) - if err != nil { - if regstate.IsNotFoundError(err) { - return nil - } - return err - } - req := runhcs.VMRequest{ - ID: cfg.ContainerID, - Op: runhcs.OpSyncNamespace, - } - shimPath := runhcs.VMPipePath(cfg.HostUniqueID) - if err := runhcs.IssueVMRequest(shimPath, &req); err != nil { - // The shim is likely gone. Simply ignore the sync as if it didn't exist. - if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { - // Remove the reg key there is no point to try again - _ = cfg.Remove() - return nil - } - f := map[string]interface{}{ - "id": namespace.Id, - "container-id": cfg.ContainerID, - } - logrus.WithFields(f). - WithError(err). - Debugf("hcs::HostComputeNamespace::Sync failed to connect to shim pipe: '%s'", shimPath) - return err - } - return nil -} - -// ModifyNamespaceSettings updates the Endpoints/Containers of a Namespace. -func ModifyNamespaceSettings(namespaceId string, request *ModifyNamespaceSettingRequest) error { - logrus.Debugf("hcn::HostComputeNamespace::ModifyNamespaceSettings id=%s", namespaceId) - - namespaceSettings, err := json.Marshal(request) - if err != nil { - return err - } - - _, err = modifyNamespace(namespaceId, string(namespaceSettings)) - if err != nil { - return err - } - return nil -} - -// AddNamespaceEndpoint adds an endpoint to a Namespace. -func AddNamespaceEndpoint(namespaceId string, endpointId string) error { - logrus.Debugf("hcn::HostComputeEndpoint::AddNamespaceEndpoint id=%s", endpointId) - - mapA := map[string]string{"EndpointId": endpointId} - settingsJson, err := json.Marshal(mapA) - if err != nil { - return err - } - requestMessage := &ModifyNamespaceSettingRequest{ - ResourceType: NamespaceResourceTypeEndpoint, - RequestType: RequestTypeAdd, - Settings: settingsJson, - } - - return ModifyNamespaceSettings(namespaceId, requestMessage) -} - -// RemoveNamespaceEndpoint removes an endpoint from a Namespace. -func RemoveNamespaceEndpoint(namespaceId string, endpointId string) error { - logrus.Debugf("hcn::HostComputeNamespace::RemoveNamespaceEndpoint id=%s", endpointId) - - mapA := map[string]string{"EndpointId": endpointId} - settingsJson, err := json.Marshal(mapA) - if err != nil { - return err - } - requestMessage := &ModifyNamespaceSettingRequest{ - ResourceType: NamespaceResourceTypeEndpoint, - RequestType: RequestTypeRemove, - Settings: settingsJson, - } - - return ModifyNamespaceSettings(namespaceId, requestMessage) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go deleted file mode 100644 index 41dcdac24a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go +++ /dev/null @@ -1,464 +0,0 @@ -//go:build windows - -package hcn - -import ( - "encoding/json" - "errors" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// Route is associated with a subnet. -type Route struct { - NextHop string `json:",omitempty"` - DestinationPrefix string `json:",omitempty"` - Metric uint16 `json:",omitempty"` -} - -// Subnet is associated with a Ipam. -type Subnet struct { - IpAddressPrefix string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - Routes []Route `json:",omitempty"` -} - -// Ipam (Internet Protocol Address Management) is associated with a network -// and represents the address space(s) of a network. -type Ipam struct { - Type string `json:",omitempty"` // Ex: Static, DHCP - Subnets []Subnet `json:",omitempty"` -} - -// MacRange is associated with MacPool and respresents the start and end addresses. -type MacRange struct { - StartMacAddress string `json:",omitempty"` - EndMacAddress string `json:",omitempty"` -} - -// MacPool is associated with a network and represents pool of MacRanges. -type MacPool struct { - Ranges []MacRange `json:",omitempty"` -} - -// Dns (Domain Name System is associated with a network). -type Dns struct { - Domain string `json:",omitempty"` - Search []string `json:",omitempty"` - ServerList []string `json:",omitempty"` - Options []string `json:",omitempty"` -} - -// NetworkType are various networks. -type NetworkType string - -// NetworkType const -const ( - NAT NetworkType = "NAT" - Transparent NetworkType = "Transparent" - L2Bridge NetworkType = "L2Bridge" - L2Tunnel NetworkType = "L2Tunnel" - ICS NetworkType = "ICS" - Private NetworkType = "Private" - Overlay NetworkType = "Overlay" -) - -// NetworkFlags are various network flags. -type NetworkFlags uint32 - -// NetworkFlags const -const ( - None NetworkFlags = 0 - EnableNonPersistent NetworkFlags = 8 -) - -// HostComputeNetwork represents a network -type HostComputeNetwork struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - Type NetworkType `json:",omitempty"` - Policies []NetworkPolicy `json:",omitempty"` - MacPool MacPool `json:",omitempty"` - Dns Dns `json:",omitempty"` - Ipams []Ipam `json:",omitempty"` - Flags NetworkFlags `json:",omitempty"` // 0: None - Health Health `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` -} - -// NetworkResourceType are the 3 different Network settings resources. -type NetworkResourceType string - -var ( - // NetworkResourceTypePolicy is for Network's policies. Ex: RemoteSubnet - NetworkResourceTypePolicy NetworkResourceType = "Policy" - // NetworkResourceTypeDNS is for Network's DNS settings. - NetworkResourceTypeDNS NetworkResourceType = "DNS" - // NetworkResourceTypeExtension is for Network's extension settings. - NetworkResourceTypeExtension NetworkResourceType = "Extension" -) - -// ModifyNetworkSettingRequest is the structure used to send request to modify an network. -// Used to update DNS/extension/policy on an network. -type ModifyNetworkSettingRequest struct { - ResourceType NetworkResourceType `json:",omitempty"` // Policy, DNS, Extension - RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh - Settings json.RawMessage `json:",omitempty"` -} - -type PolicyNetworkRequest struct { - Policies []NetworkPolicy `json:",omitempty"` -} - -func getNetwork(networkGuid guid.GUID, query string) (*HostComputeNetwork, error) { - // Open network. - var ( - networkHandle hcnNetwork - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Query network. - hr = hcnQueryNetworkProperties(networkHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close network. - hr = hcnCloseNetwork(networkHandle) - if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNetwork - var outputNetwork HostComputeNetwork - - // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), - // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before - // unmarshaling the JSON blob. - outputNetwork.Type = NAT - - if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { - return nil, err - } - return &outputNetwork, nil -} - -func enumerateNetworks(query string) ([]HostComputeNetwork, error) { - // Enumerate all Network Guids - var ( - resultBuffer *uint16 - networkBuffer *uint16 - ) - hr := hcnEnumerateNetworks(query, &networkBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateNetworks", hr, resultBuffer); err != nil { - return nil, err - } - - networks := interop.ConvertAndFreeCoTaskMemString(networkBuffer) - var networkIds []guid.GUID - if err := json.Unmarshal([]byte(networks), &networkIds); err != nil { - return nil, err - } - - var outputNetworks []HostComputeNetwork - for _, networkGuid := range networkIds { - network, err := getNetwork(networkGuid, query) - if err != nil { - return nil, err - } - outputNetworks = append(outputNetworks, *network) - } - return outputNetworks, nil -} - -func createNetwork(settings string) (*HostComputeNetwork, error) { - // Create new network. - var ( - networkHandle hcnNetwork - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - networkGuid := guid.GUID{} - hr := hcnCreateNetwork(&networkGuid, settings, &networkHandle, &resultBuffer) - if err := checkForErrors("hcnCreateNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Query network. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close network. - hr = hcnCloseNetwork(networkHandle) - if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNetwork - var outputNetwork HostComputeNetwork - - // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), - // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before - // unmarshaling the JSON blob. - outputNetwork.Type = NAT - - if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { - return nil, err - } - return &outputNetwork, nil -} - -func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, error) { - networkGuid, err := guid.FromString(networkId) - if err != nil { - return nil, errInvalidNetworkID - } - // Open Network - var ( - networkHandle hcnNetwork - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Modify Network - hr = hcnModifyNetwork(networkHandle, settings, &resultBuffer) - if err := checkForErrors("hcnModifyNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Query network. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close network. - hr = hcnCloseNetwork(networkHandle) - if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNetwork - var outputNetwork HostComputeNetwork - - // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), - // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before - // unmarshaling the JSON blob. - outputNetwork.Type = NAT - - if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { - return nil, err - } - return &outputNetwork, nil -} - -func deleteNetwork(networkId string) error { - networkGuid, err := guid.FromString(networkId) - if err != nil { - return errInvalidNetworkID - } - var resultBuffer *uint16 - hr := hcnDeleteNetwork(&networkGuid, &resultBuffer) - if err := checkForErrors("hcnDeleteNetwork", hr, resultBuffer); err != nil { - return err - } - return nil -} - -// ListNetworks makes a call to list all available networks. -func ListNetworks() ([]HostComputeNetwork, error) { - hcnQuery := defaultQuery() - networks, err := ListNetworksQuery(hcnQuery) - if err != nil { - return nil, err - } - return networks, nil -} - -// ListNetworksQuery makes a call to query the list of available networks. -func ListNetworksQuery(query HostComputeQuery) ([]HostComputeNetwork, error) { - queryJson, err := json.Marshal(query) - if err != nil { - return nil, err - } - - networks, err := enumerateNetworks(string(queryJson)) - if err != nil { - return nil, err - } - return networks, nil -} - -// GetNetworkByID returns the network specified by Id. -func GetNetworkByID(networkID string) (*HostComputeNetwork, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": networkID} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - networks, err := ListNetworksQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(networks) == 0 { - return nil, NetworkNotFoundError{NetworkID: networkID} - } - return &networks[0], err -} - -// GetNetworkByName returns the network specified by Name. -func GetNetworkByName(networkName string) (*HostComputeNetwork, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"Name": networkName} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - networks, err := ListNetworksQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(networks) == 0 { - return nil, NetworkNotFoundError{NetworkName: networkName} - } - return &networks[0], err -} - -// Create Network. -func (network *HostComputeNetwork) Create() (*HostComputeNetwork, error) { - logrus.Debugf("hcn::HostComputeNetwork::Create id=%s", network.Id) - for _, ipam := range network.Ipams { - for _, subnet := range ipam.Subnets { - if subnet.IpAddressPrefix != "" { - hasDefault := false - for _, route := range subnet.Routes { - if route.NextHop == "" { - return nil, errors.New("network create error, subnet has address prefix but no gateway specified") - } - if route.DestinationPrefix == "0.0.0.0/0" || route.DestinationPrefix == "::/0" { - hasDefault = true - } - } - if !hasDefault { - return nil, errors.New("network create error, no default gateway") - } - } - } - } - - jsonString, err := json.Marshal(network) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeNetwork::Create JSON: %s", jsonString) - network, hcnErr := createNetwork(string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return network, nil -} - -// Delete Network. -func (network *HostComputeNetwork) Delete() error { - logrus.Debugf("hcn::HostComputeNetwork::Delete id=%s", network.Id) - - if err := deleteNetwork(network.Id); err != nil { - return err - } - return nil -} - -// ModifyNetworkSettings updates the Policy for a network. -func (network *HostComputeNetwork) ModifyNetworkSettings(request *ModifyNetworkSettingRequest) error { - logrus.Debugf("hcn::HostComputeNetwork::ModifyNetworkSettings id=%s", network.Id) - - networkSettingsRequest, err := json.Marshal(request) - if err != nil { - return err - } - - _, err = modifyNetwork(network.Id, string(networkSettingsRequest)) - if err != nil { - return err - } - return nil -} - -// AddPolicy applies a Policy (ex: RemoteSubnet) on the Network. -func (network *HostComputeNetwork) AddPolicy(networkPolicy PolicyNetworkRequest) error { - logrus.Debugf("hcn::HostComputeNetwork::AddPolicy id=%s", network.Id) - - settingsJson, err := json.Marshal(networkPolicy) - if err != nil { - return err - } - requestMessage := &ModifyNetworkSettingRequest{ - ResourceType: NetworkResourceTypePolicy, - RequestType: RequestTypeAdd, - Settings: settingsJson, - } - - return network.ModifyNetworkSettings(requestMessage) -} - -// RemovePolicy removes a Policy (ex: RemoteSubnet) from the Network. -func (network *HostComputeNetwork) RemovePolicy(networkPolicy PolicyNetworkRequest) error { - logrus.Debugf("hcn::HostComputeNetwork::RemovePolicy id=%s", network.Id) - - settingsJson, err := json.Marshal(networkPolicy) - if err != nil { - return err - } - requestMessage := &ModifyNetworkSettingRequest{ - ResourceType: NetworkResourceTypePolicy, - RequestType: RequestTypeRemove, - Settings: settingsJson, - } - - return network.ModifyNetworkSettings(requestMessage) -} - -// CreateEndpoint creates an endpoint on the Network. -func (network *HostComputeNetwork) CreateEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) { - isRemote := endpoint.Flags&EndpointFlagsRemoteEndpoint != 0 - logrus.Debugf("hcn::HostComputeNetwork::CreatEndpoint, networkId=%s remote=%t", network.Id, isRemote) - - endpoint.HostComputeNetwork = network.Id - endpointSettings, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - newEndpoint, err := createEndpoint(network.Id, string(endpointSettings)) - if err != nil { - return nil, err - } - return newEndpoint, nil -} - -// CreateRemoteEndpoint creates a remote endpoint on the Network. -func (network *HostComputeNetwork) CreateRemoteEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) { - endpoint.Flags = EndpointFlagsRemoteEndpoint | endpoint.Flags - return network.CreateEndpoint(endpoint) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go deleted file mode 100644 index a695f1c27d..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go +++ /dev/null @@ -1,346 +0,0 @@ -//go:build windows - -package hcn - -import ( - "encoding/json" -) - -// EndpointPolicyType are the potential Policies that apply to Endpoints. -type EndpointPolicyType string - -// EndpointPolicyType const -const ( - PortMapping EndpointPolicyType = "PortMapping" - ACL EndpointPolicyType = "ACL" - QOS EndpointPolicyType = "QOS" - L2Driver EndpointPolicyType = "L2Driver" - OutBoundNAT EndpointPolicyType = "OutBoundNAT" - SDNRoute EndpointPolicyType = "SDNRoute" - L4Proxy EndpointPolicyType = "L4Proxy" - L4WFPPROXY EndpointPolicyType = "L4WFPPROXY" - PortName EndpointPolicyType = "PortName" - EncapOverhead EndpointPolicyType = "EncapOverhead" - IOV EndpointPolicyType = "Iov" - // Endpoint and Network have InterfaceConstraint and ProviderAddress - NetworkProviderAddress EndpointPolicyType = "ProviderAddress" - NetworkInterfaceConstraint EndpointPolicyType = "InterfaceConstraint" - TierAcl EndpointPolicyType = "TierAcl" -) - -// EndpointPolicy is a collection of Policy settings for an Endpoint. -type EndpointPolicy struct { - Type EndpointPolicyType `json:""` - Settings json.RawMessage `json:",omitempty"` -} - -// NetworkPolicyType are the potential Policies that apply to Networks. -type NetworkPolicyType string - -// NetworkPolicyType const -const ( - SourceMacAddress NetworkPolicyType = "SourceMacAddress" - NetAdapterName NetworkPolicyType = "NetAdapterName" - VSwitchExtension NetworkPolicyType = "VSwitchExtension" - DrMacAddress NetworkPolicyType = "DrMacAddress" - AutomaticDNS NetworkPolicyType = "AutomaticDNS" - InterfaceConstraint NetworkPolicyType = "InterfaceConstraint" - ProviderAddress NetworkPolicyType = "ProviderAddress" - RemoteSubnetRoute NetworkPolicyType = "RemoteSubnetRoute" - VxlanPort NetworkPolicyType = "VxlanPort" - HostRoute NetworkPolicyType = "HostRoute" - SetPolicy NetworkPolicyType = "SetPolicy" - NetworkL4Proxy NetworkPolicyType = "L4Proxy" - LayerConstraint NetworkPolicyType = "LayerConstraint" - NetworkACL NetworkPolicyType = "NetworkACL" -) - -// NetworkPolicy is a collection of Policy settings for a Network. -type NetworkPolicy struct { - Type NetworkPolicyType `json:""` - Settings json.RawMessage `json:",omitempty"` -} - -// SubnetPolicyType are the potential Policies that apply to Subnets. -type SubnetPolicyType string - -// SubnetPolicyType const -const ( - VLAN SubnetPolicyType = "VLAN" - VSID SubnetPolicyType = "VSID" -) - -// SubnetPolicy is a collection of Policy settings for a Subnet. -type SubnetPolicy struct { - Type SubnetPolicyType `json:""` - Settings json.RawMessage `json:",omitempty"` -} - -// NatFlags are flags for portmappings. -type NatFlags uint32 - -const ( - NatFlagsNone NatFlags = iota - NatFlagsLocalRoutedVip - NatFlagsIPv6 -) - -/// Endpoint Policy objects - -// PortMappingPolicySetting defines Port Mapping (NAT) -type PortMappingPolicySetting struct { - Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 - InternalPort uint16 `json:",omitempty"` - ExternalPort uint16 `json:",omitempty"` - VIP string `json:",omitempty"` - Flags NatFlags `json:",omitempty"` -} - -// ActionType associated with ACLs. Value is either Allow or Block. -type ActionType string - -// DirectionType associated with ACLs. Value is either In or Out. -type DirectionType string - -// RuleType associated with ACLs. Value is either Host (WFP) or Switch (VFP). -type RuleType string - -const ( - // Allow traffic - ActionTypeAllow ActionType = "Allow" - // Block traffic - ActionTypeBlock ActionType = "Block" - // Pass traffic - ActionTypePass ActionType = "Pass" - - // In is traffic coming to the Endpoint - DirectionTypeIn DirectionType = "In" - // Out is traffic leaving the Endpoint - DirectionTypeOut DirectionType = "Out" - - // Host creates WFP (Windows Firewall) rules - RuleTypeHost RuleType = "Host" - // Switch creates VFP (Virtual Filter Platform) rules - RuleTypeSwitch RuleType = "Switch" -) - -// AclPolicySetting creates firewall rules on an endpoint -type AclPolicySetting struct { - Protocols string `json:",omitempty"` // EX: 6 (TCP), 17 (UDP), 1 (ICMPv4), 58 (ICMPv6), 2 (IGMP) - Action ActionType `json:","` - Direction DirectionType `json:","` - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:",omitempty"` - RemotePorts string `json:",omitempty"` - RuleType RuleType `json:",omitempty"` - Priority uint16 `json:",omitempty"` -} - -// QosPolicySetting sets Quality of Service bandwidth caps on an Endpoint. -type QosPolicySetting struct { - MaximumOutgoingBandwidthInBytes uint64 -} - -// OutboundNatPolicySetting sets outbound Network Address Translation on an Endpoint. -type OutboundNatPolicySetting struct { - VirtualIP string `json:",omitempty"` - Exceptions []string `json:",omitempty"` - Destinations []string `json:",omitempty"` - Flags NatFlags `json:",omitempty"` -} - -// SDNRoutePolicySetting sets SDN Route on an Endpoint. -type SDNRoutePolicySetting struct { - DestinationPrefix string `json:",omitempty"` - NextHop string `json:",omitempty"` - NeedEncap bool `json:",omitempty"` -} - -// NetworkACLPolicySetting creates ACL rules on a network -type NetworkACLPolicySetting struct { - Protocols string `json:",omitempty"` // EX: 6 (TCP), 17 (UDP), 1 (ICMPv4), 58 (ICMPv6), 2 (IGMP) - Action ActionType `json:","` - Direction DirectionType `json:","` - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:",omitempty"` - RemotePorts string `json:",omitempty"` - RuleType RuleType `json:",omitempty"` - Priority uint16 `json:",omitempty"` -} - -// FiveTuple is nested in L4ProxyPolicySetting for WFP support. -type FiveTuple struct { - Protocols string `json:",omitempty"` - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:",omitempty"` - RemotePorts string `json:",omitempty"` - Priority uint16 `json:",omitempty"` -} - -// ProxyExceptions exempts traffic to IpAddresses and Ports -type ProxyExceptions struct { - IpAddressExceptions []string `json:",omitempty"` - PortExceptions []string `json:",omitempty"` -} - -// L4WfpProxyPolicySetting sets Layer-4 Proxy on an endpoint. -type L4WfpProxyPolicySetting struct { - InboundProxyPort string `json:",omitempty"` - OutboundProxyPort string `json:",omitempty"` - FilterTuple FiveTuple `json:",omitempty"` - UserSID string `json:",omitempty"` - InboundExceptions ProxyExceptions `json:",omitempty"` - OutboundExceptions ProxyExceptions `json:",omitempty"` -} - -// PortnameEndpointPolicySetting sets the port name for an endpoint. -type PortnameEndpointPolicySetting struct { - Name string `json:",omitempty"` -} - -// EncapOverheadEndpointPolicySetting sets the encap overhead for an endpoint. -type EncapOverheadEndpointPolicySetting struct { - Overhead uint16 `json:",omitempty"` -} - -// IovPolicySetting sets the Iov settings for an endpoint. -type IovPolicySetting struct { - IovOffloadWeight uint32 `json:",omitempty"` - QueuePairsRequested uint32 `json:",omitempty"` - InterruptModeration uint32 `json:",omitempty"` -} - -/// Endpoint and Network Policy objects - -// ProviderAddressEndpointPolicySetting sets the PA for an endpoint. -type ProviderAddressEndpointPolicySetting struct { - ProviderAddress string `json:",omitempty"` -} - -// InterfaceConstraintPolicySetting limits an Endpoint or Network to a specific Nic. -type InterfaceConstraintPolicySetting struct { - InterfaceGuid string `json:",omitempty"` - InterfaceLuid uint64 `json:",omitempty"` - InterfaceIndex uint32 `json:",omitempty"` - InterfaceMediaType uint32 `json:",omitempty"` - InterfaceAlias string `json:",omitempty"` - InterfaceDescription string `json:",omitempty"` -} - -/// Network Policy objects - -// SourceMacAddressNetworkPolicySetting sets source MAC for a network. -type SourceMacAddressNetworkPolicySetting struct { - SourceMacAddress string `json:",omitempty"` -} - -// NetAdapterNameNetworkPolicySetting sets network adapter of a network. -type NetAdapterNameNetworkPolicySetting struct { - NetworkAdapterName string `json:",omitempty"` -} - -// VSwitchExtensionNetworkPolicySetting enables/disabled VSwitch extensions for a network. -type VSwitchExtensionNetworkPolicySetting struct { - ExtensionID string `json:",omitempty"` - Enable bool `json:",omitempty"` -} - -// DrMacAddressNetworkPolicySetting sets the DR MAC for a network. -type DrMacAddressNetworkPolicySetting struct { - Address string `json:",omitempty"` -} - -// AutomaticDNSNetworkPolicySetting enables/disables automatic DNS on a network. -type AutomaticDNSNetworkPolicySetting struct { - Enable bool `json:",omitempty"` -} - -type LayerConstraintNetworkPolicySetting struct { - LayerId string `json:",omitempty"` -} - -/// Subnet Policy objects - -// VlanPolicySetting isolates a subnet with VLAN tagging. -type VlanPolicySetting struct { - IsolationId uint32 `json:","` -} - -// VsidPolicySetting isolates a subnet with VSID tagging. -type VsidPolicySetting struct { - IsolationId uint32 `json:","` -} - -// RemoteSubnetRoutePolicySetting creates remote subnet route rules on a network -type RemoteSubnetRoutePolicySetting struct { - DestinationPrefix string - IsolationId uint16 - ProviderAddress string - DistributedRouterMacAddress string -} - -// SetPolicyTypes associated with SetPolicy. Value is IPSET. -type SetPolicyType string - -const ( - SetPolicyTypeIpSet SetPolicyType = "IPSET" - SetPolicyTypeNestedIpSet SetPolicyType = "NESTEDIPSET" -) - -// SetPolicySetting creates IPSets on network -type SetPolicySetting struct { - Id string - Name string - Type SetPolicyType `json:"PolicyType"` - Values string -} - -// VxlanPortPolicySetting allows configuring the VXLAN TCP port -type VxlanPortPolicySetting struct { - Port uint16 -} - -// ProtocolType associated with L4ProxyPolicy -type ProtocolType uint32 - -const ( - ProtocolTypeUnknown ProtocolType = 0 - ProtocolTypeICMPv4 ProtocolType = 1 - ProtocolTypeIGMP ProtocolType = 2 - ProtocolTypeTCP ProtocolType = 6 - ProtocolTypeUDP ProtocolType = 17 - ProtocolTypeICMPv6 ProtocolType = 58 -) - -//L4ProxyPolicySetting applies proxy policy on network/endpoint -type L4ProxyPolicySetting struct { - IP string `json:",omitempty"` - Port string `json:",omitempty"` - Protocol ProtocolType `json:",omitempty"` - Exceptions []string `json:",omitempty"` - Destination string - OutboundNAT bool `json:",omitempty"` -} - -// TierAclRule represents an ACL within TierAclPolicySetting -type TierAclRule struct { - Id string `json:",omitempty"` - Protocols string `json:",omitempty"` - TierAclRuleAction ActionType `json:","` - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:",omitempty"` - RemotePorts string `json:",omitempty"` - Priority uint16 `json:",omitempty"` -} - -// TierAclPolicySetting represents a Tier containing ACLs -type TierAclPolicySetting struct { - Name string `json:","` - Direction DirectionType `json:","` - Order uint16 `json:""` - TierAclRules []TierAclRule `json:",omitempty"` -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go deleted file mode 100644 index d0761d6bd0..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go +++ /dev/null @@ -1,268 +0,0 @@ -//go:build windows - -package hcn - -import ( - "encoding/json" - "errors" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// HostComputeRoute represents SDN routes. -type HostComputeRoute struct { - ID string `json:"ID,omitempty"` - HostComputeEndpoints []string `json:",omitempty"` - Setting []SDNRoutePolicySetting `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` -} - -// ListRoutes makes a call to list all available routes. -func ListRoutes() ([]HostComputeRoute, error) { - hcnQuery := defaultQuery() - routes, err := ListRoutesQuery(hcnQuery) - if err != nil { - return nil, err - } - return routes, nil -} - -// ListRoutesQuery makes a call to query the list of available routes. -func ListRoutesQuery(query HostComputeQuery) ([]HostComputeRoute, error) { - queryJSON, err := json.Marshal(query) - if err != nil { - return nil, err - } - - routes, err := enumerateRoutes(string(queryJSON)) - if err != nil { - return nil, err - } - return routes, nil -} - -// GetRouteByID returns the route specified by Id. -func GetRouteByID(routeID string) (*HostComputeRoute, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": routeID} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - routes, err := ListRoutesQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(routes) == 0 { - return nil, RouteNotFoundError{RouteId: routeID} - } - return &routes[0], err -} - -// Create Route. -func (route *HostComputeRoute) Create() (*HostComputeRoute, error) { - logrus.Debugf("hcn::HostComputeRoute::Create id=%s", route.ID) - - jsonString, err := json.Marshal(route) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeRoute::Create JSON: %s", jsonString) - route, hcnErr := createRoute(string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return route, nil -} - -// Delete Route. -func (route *HostComputeRoute) Delete() error { - logrus.Debugf("hcn::HostComputeRoute::Delete id=%s", route.ID) - - existingRoute, _ := GetRouteByID(route.ID) - - if existingRoute != nil { - if err := deleteRoute(route.ID); err != nil { - return err - } - } - - return nil -} - -// AddEndpoint add an endpoint to a route -// Since HCNRoute doesn't implement modify functionality, add operation is essentially delete and add -func (route *HostComputeRoute) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) { - logrus.Debugf("hcn::HostComputeRoute::AddEndpoint route=%s endpoint=%s", route.ID, endpoint.Id) - - err := route.Delete() - if err != nil { - return nil, err - } - - // Add Endpoint to the Existing List - route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id) - - return route.Create() -} - -// RemoveEndpoint removes an endpoint from a route -// Since HCNRoute doesn't implement modify functionality, remove operation is essentially delete and add -func (route *HostComputeRoute) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) { - logrus.Debugf("hcn::HostComputeRoute::RemoveEndpoint route=%s endpoint=%s", route.ID, endpoint.Id) - - err := route.Delete() - if err != nil { - return nil, err - } - - // Create a list of all the endpoints besides the one being removed - i := 0 - for index, endpointReference := range route.HostComputeEndpoints { - if endpointReference == endpoint.Id { - i = index - break - } - } - - route.HostComputeEndpoints = append(route.HostComputeEndpoints[0:i], route.HostComputeEndpoints[i+1:]...) - return route.Create() -} - -// AddRoute for the specified endpoints and SDN Route setting -func AddRoute(endpoints []HostComputeEndpoint, destinationPrefix string, nextHop string, needEncapsulation bool) (*HostComputeRoute, error) { - logrus.Debugf("hcn::HostComputeRoute::AddRoute endpointId=%v, destinationPrefix=%v, nextHop=%v, needEncapsulation=%v", endpoints, destinationPrefix, nextHop, needEncapsulation) - - if len(endpoints) <= 0 { - return nil, errors.New("missing endpoints") - } - - route := &HostComputeRoute{ - SchemaVersion: V2SchemaVersion(), - Setting: []SDNRoutePolicySetting{ - { - DestinationPrefix: destinationPrefix, - NextHop: nextHop, - NeedEncap: needEncapsulation, - }, - }, - } - - for _, endpoint := range endpoints { - route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id) - } - - return route.Create() -} - -func enumerateRoutes(query string) ([]HostComputeRoute, error) { - // Enumerate all routes Guids - var ( - resultBuffer *uint16 - routeBuffer *uint16 - ) - hr := hcnEnumerateRoutes(query, &routeBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateRoutes", hr, resultBuffer); err != nil { - return nil, err - } - - routes := interop.ConvertAndFreeCoTaskMemString(routeBuffer) - var routeIds []guid.GUID - if err := json.Unmarshal([]byte(routes), &routeIds); err != nil { - return nil, err - } - - var outputRoutes []HostComputeRoute - for _, routeGUID := range routeIds { - route, err := getRoute(routeGUID, query) - if err != nil { - return nil, err - } - outputRoutes = append(outputRoutes, *route) - } - return outputRoutes, nil -} - -func getRoute(routeGUID guid.GUID, query string) (*HostComputeRoute, error) { - // Open routes. - var ( - routeHandle hcnRoute - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenRoute(&routeGUID, &routeHandle, &resultBuffer) - if err := checkForErrors("hcnOpenRoute", hr, resultBuffer); err != nil { - return nil, err - } - // Query routes. - hr = hcnQueryRouteProperties(routeHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close routes. - hr = hcnCloseRoute(routeHandle) - if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeRoute - var outputRoute HostComputeRoute - if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil { - return nil, err - } - return &outputRoute, nil -} - -func createRoute(settings string) (*HostComputeRoute, error) { - // Create new route. - var ( - routeHandle hcnRoute - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - routeGUID := guid.GUID{} - hr := hcnCreateRoute(&routeGUID, settings, &routeHandle, &resultBuffer) - if err := checkForErrors("hcnCreateRoute", hr, resultBuffer); err != nil { - return nil, err - } - // Query route. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryRouteProperties(routeHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close Route. - hr = hcnCloseRoute(routeHandle) - if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeRoute - var outputRoute HostComputeRoute - if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil { - return nil, err - } - return &outputRoute, nil -} - -func deleteRoute(routeID string) error { - routeGUID, err := guid.FromString(routeID) - if err != nil { - return errInvalidRouteID - } - var resultBuffer *uint16 - hr := hcnDeleteRoute(&routeGUID, &resultBuffer) - if err := checkForErrors("hcnDeleteRoute", hr, resultBuffer); err != nil { - return err - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go b/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go deleted file mode 100644 index 1b4c240205..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go +++ /dev/null @@ -1,150 +0,0 @@ -//go:build windows - -package hcn - -import ( - "sync" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/Microsoft/hcsshim/internal/log" -) - -var ( - // featuresOnce handles assigning the supported features and printing the supported info to stdout only once to avoid unnecessary work - // multiple times. - featuresOnce sync.Once - featuresErr error - supportedFeatures SupportedFeatures -) - -// SupportedFeatures are the features provided by the Service. -type SupportedFeatures struct { - Acl AclFeatures `json:"ACL"` - Api ApiSupport `json:"API"` - RemoteSubnet bool `json:"RemoteSubnet"` - HostRoute bool `json:"HostRoute"` - DSR bool `json:"DSR"` - Slash32EndpointPrefixes bool `json:"Slash32EndpointPrefixes"` - AclSupportForProtocol252 bool `json:"AclSupportForProtocol252"` - SessionAffinity bool `json:"SessionAffinity"` - IPv6DualStack bool `json:"IPv6DualStack"` - SetPolicy bool `json:"SetPolicy"` - VxlanPort bool `json:"VxlanPort"` - L4Proxy bool `json:"L4Proxy"` // network policy that applies VFP rules to all endpoints on the network to redirect traffic - L4WfpProxy bool `json:"L4WfpProxy"` // endpoint policy that applies WFP filters to redirect traffic to/from that endpoint - TierAcl bool `json:"TierAcl"` - NetworkACL bool `json:"NetworkACL"` - NestedIpSet bool `json:"NestedIpSet"` -} - -// AclFeatures are the supported ACL possibilities. -type AclFeatures struct { - AclAddressLists bool `json:"AclAddressLists"` - AclNoHostRulePriority bool `json:"AclHostRulePriority"` - AclPortRanges bool `json:"AclPortRanges"` - AclRuleId bool `json:"AclRuleId"` -} - -// ApiSupport lists the supported API versions. -type ApiSupport struct { - V1 bool `json:"V1"` - V2 bool `json:"V2"` -} - -// GetCachedSupportedFeatures returns the features supported by the Service and an error if the query failed. If this has been called -// before it will return the supported features and error received from the first call. This can be used to optimize if many calls to the -// various hcn.IsXSupported methods need to be made. -func GetCachedSupportedFeatures() (SupportedFeatures, error) { - // Only query the HCN version and features supported once, instead of everytime this is invoked. The logs are useful to - // debug incidents where there's confusion on if a feature is supported on the host machine. The sync.Once helps to avoid redundant - // spam of these anytime a check needs to be made for if an HCN feature is supported. This is a common occurrence in kube-proxy - // for example. - featuresOnce.Do(func() { - supportedFeatures, featuresErr = getSupportedFeatures() - }) - - return supportedFeatures, featuresErr -} - -// GetSupportedFeatures returns the features supported by the Service. -// -// Deprecated: Use GetCachedSupportedFeatures instead. -func GetSupportedFeatures() SupportedFeatures { - features, err := GetCachedSupportedFeatures() - if err != nil { - // Expected on pre-1803 builds, all features will be false/unsupported - logrus.WithError(err).Errorf("unable to obtain supported features") - return features - } - return features -} - -func getSupportedFeatures() (SupportedFeatures, error) { - var features SupportedFeatures - globals, err := GetGlobals() - if err != nil { - // It's expected if this fails once, it should always fail. It should fail on pre 1803 builds for example. - return SupportedFeatures{}, errors.Wrap(err, "failed to query HCN version number: this is expected on pre 1803 builds.") - } - features.Acl = AclFeatures{ - AclAddressLists: isFeatureSupported(globals.Version, HNSVersion1803), - AclNoHostRulePriority: isFeatureSupported(globals.Version, HNSVersion1803), - AclPortRanges: isFeatureSupported(globals.Version, HNSVersion1803), - AclRuleId: isFeatureSupported(globals.Version, HNSVersion1803), - } - - features.Api = ApiSupport{ - V2: isFeatureSupported(globals.Version, V2ApiSupport), - V1: true, // HNSCall is still available. - } - - features.RemoteSubnet = isFeatureSupported(globals.Version, RemoteSubnetVersion) - features.HostRoute = isFeatureSupported(globals.Version, HostRouteVersion) - features.DSR = isFeatureSupported(globals.Version, DSRVersion) - features.Slash32EndpointPrefixes = isFeatureSupported(globals.Version, Slash32EndpointPrefixesVersion) - features.AclSupportForProtocol252 = isFeatureSupported(globals.Version, AclSupportForProtocol252Version) - features.SessionAffinity = isFeatureSupported(globals.Version, SessionAffinityVersion) - features.IPv6DualStack = isFeatureSupported(globals.Version, IPv6DualStackVersion) - features.SetPolicy = isFeatureSupported(globals.Version, SetPolicyVersion) - features.VxlanPort = isFeatureSupported(globals.Version, VxlanPortVersion) - features.L4Proxy = isFeatureSupported(globals.Version, L4ProxyPolicyVersion) - features.L4WfpProxy = isFeatureSupported(globals.Version, L4WfpProxyPolicyVersion) - features.TierAcl = isFeatureSupported(globals.Version, TierAclPolicyVersion) - features.NetworkACL = isFeatureSupported(globals.Version, NetworkACLPolicyVersion) - features.NestedIpSet = isFeatureSupported(globals.Version, NestedIpSetVersion) - - log.L.WithFields(logrus.Fields{ - "version": globals.Version, - "supportedFeatures": features, - }).Info("HCN feature check") - - return features, nil -} - -func isFeatureSupported(currentVersion Version, versionsSupported VersionRanges) bool { - isFeatureSupported := false - - for _, versionRange := range versionsSupported { - isFeatureSupported = isFeatureSupported || isFeatureInRange(currentVersion, versionRange) - } - - return isFeatureSupported -} - -func isFeatureInRange(currentVersion Version, versionRange VersionRange) bool { - if currentVersion.Major < versionRange.MinVersion.Major { - return false - } - if currentVersion.Major > versionRange.MaxVersion.Major { - return false - } - if currentVersion.Major == versionRange.MinVersion.Major && currentVersion.Minor < versionRange.MinVersion.Minor { - return false - } - if currentVersion.Major == versionRange.MaxVersion.Major && currentVersion.Minor > versionRange.MaxVersion.Minor { - return false - } - return true -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hcsshim.go b/test/vendor/github.com/Microsoft/hcsshim/hcsshim.go deleted file mode 100644 index 95dc2a0255..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hcsshim.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build windows - -// Shim for the Host Compute Service (HCS) to manage Windows Server -// containers and Hyper-V containers. - -package hcsshim - -import ( - "syscall" - - "github.com/Microsoft/hcsshim/internal/hcserror" -) - -//go:generate go run mksyscall_windows.go -output zsyscall_windows.go hcsshim.go - -//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId - -const ( - // Specific user-visible exit codes - WaitErrExecFailed = 32767 - - ERROR_GEN_FAILURE = hcserror.ERROR_GEN_FAILURE - ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115) - WSAEINVAL = syscall.Errno(10022) - - // Timeout on wait calls - TimeoutInfinite = 0xFFFFFFFF -) - -type HcsError = hcserror.HcsError diff --git a/test/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/test/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go deleted file mode 100644 index ea71135acc..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go +++ /dev/null @@ -1,120 +0,0 @@ -//go:build windows - -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -// HNSEndpoint represents a network endpoint in HNS -type HNSEndpoint = hns.HNSEndpoint - -// HNSEndpointStats represent the stats for an networkendpoint in HNS -type HNSEndpointStats = hns.EndpointStats - -// Namespace represents a Compartment. -type Namespace = hns.Namespace - -//SystemType represents the type of the system on which actions are done -type SystemType string - -// SystemType const -const ( - ContainerType SystemType = "Container" - VirtualMachineType SystemType = "VirtualMachine" - HostType SystemType = "Host" -) - -// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type EndpointAttachDetachRequest = hns.EndpointAttachDetachRequest - -// EndpointResquestResponse is object to get the endpoint request response -type EndpointResquestResponse = hns.EndpointResquestResponse - -// HNSEndpointRequest makes a HNS call to modify/query a network endpoint -func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { - return hns.HNSEndpointRequest(method, path, request) -} - -// HNSListEndpointRequest makes a HNS call to query the list of available endpoints -func HNSListEndpointRequest() ([]HNSEndpoint, error) { - return hns.HNSListEndpointRequest() -} - -// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container -func HotAttachEndpoint(containerID string, endpointID string) error { - endpoint, err := GetHNSEndpointByID(endpointID) - if err != nil { - return err - } - isAttached, err := endpoint.IsAttached(containerID) - if isAttached { - return err - } - return modifyNetworkEndpoint(containerID, endpointID, Add) -} - -// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container -func HotDetachEndpoint(containerID string, endpointID string) error { - endpoint, err := GetHNSEndpointByID(endpointID) - if err != nil { - return err - } - isAttached, err := endpoint.IsAttached(containerID) - if !isAttached { - return err - } - return modifyNetworkEndpoint(containerID, endpointID, Remove) -} - -// ModifyContainer corresponding to the container id, by sending a request -func modifyContainer(id string, request *ResourceModificationRequestResponse) error { - container, err := OpenContainer(id) - if err != nil { - if IsNotExist(err) { - return ErrComputeSystemDoesNotExist - } - return getInnerError(err) - } - defer container.Close() - err = container.Modify(request) - if err != nil { - if IsNotSupported(err) { - return ErrPlatformNotSupported - } - return getInnerError(err) - } - - return nil -} - -func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error { - requestMessage := &ResourceModificationRequestResponse{ - Resource: Network, - Request: request, - Data: endpointID, - } - err := modifyContainer(containerID, requestMessage) - - if err != nil { - return err - } - - return nil -} - -// GetHNSEndpointByID get the Endpoint by ID -func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { - return hns.GetHNSEndpointByID(endpointID) -} - -// GetHNSEndpointByName gets the endpoint filtered by Name -func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { - return hns.GetHNSEndpointByName(endpointName) -} - -// GetHNSEndpointStats gets the endpoint stats by ID -func GetHNSEndpointStats(endpointName string) (*HNSEndpointStats, error) { - return hns.GetHNSEndpointStats(endpointName) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hnsglobals.go b/test/vendor/github.com/Microsoft/hcsshim/hnsglobals.go deleted file mode 100644 index c564bf4a35..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hnsglobals.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build windows - -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -type HNSGlobals = hns.HNSGlobals -type HNSVersion = hns.HNSVersion - -var ( - HNSVersion1803 = hns.HNSVersion1803 -) - -func GetHNSGlobals() (*HNSGlobals, error) { - return hns.GetHNSGlobals() -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go b/test/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go deleted file mode 100644 index 925c212495..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build windows - -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -// Subnet is associated with a network and represents a list -// of subnets available to the network -type Subnet = hns.Subnet - -// MacPool is associated with a network and represents a list -// of macaddresses available to the network -type MacPool = hns.MacPool - -// HNSNetwork represents a network in HNS -type HNSNetwork = hns.HNSNetwork - -// HNSNetworkRequest makes a call into HNS to update/query a single network -func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { - return hns.HNSNetworkRequest(method, path, request) -} - -// HNSListNetworkRequest makes a HNS call to query the list of available networks -func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { - return hns.HNSListNetworkRequest(method, path, request) -} - -// GetHNSNetworkByID -func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { - return hns.GetHNSNetworkByID(networkID) -} - -// GetHNSNetworkName filtered by Name -func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { - return hns.GetHNSNetworkByName(networkName) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go b/test/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go deleted file mode 100644 index 9bfe61ee83..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build windows - -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -// RoutePolicy is a structure defining schema for Route based Policy -type RoutePolicy = hns.RoutePolicy - -// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy -type ELBPolicy = hns.ELBPolicy - -// LBPolicy is a structure defining schema for LoadBalancing based Policy -type LBPolicy = hns.LBPolicy - -// PolicyList is a structure defining schema for Policy list request -type PolicyList = hns.PolicyList - -// HNSPolicyListRequest makes a call into HNS to update/query a single network -func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { - return hns.HNSPolicyListRequest(method, path, request) -} - -// HNSListPolicyListRequest gets all the policy list -func HNSListPolicyListRequest() ([]PolicyList, error) { - return hns.HNSListPolicyListRequest() -} - -// PolicyListRequest makes a HNS call to modify/query a network policy list -func PolicyListRequest(method, path, request string) (*PolicyList, error) { - return hns.PolicyListRequest(method, path, request) -} - -// GetPolicyListByID get the policy list by ID -func GetPolicyListByID(policyListID string) (*PolicyList, error) { - return hns.GetPolicyListByID(policyListID) -} - -// AddLoadBalancer policy list for the specified endpoints -func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { - return hns.AddLoadBalancer(endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) -} - -// AddRoute adds route policy list for the specified endpoints -func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { - return hns.AddRoute(endpoints, destinationPrefix, nextHop, encapEnabled) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/hnssupport.go b/test/vendor/github.com/Microsoft/hcsshim/hnssupport.go deleted file mode 100644 index d97681e0ca..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/hnssupport.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build windows - -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -type HNSSupportedFeatures = hns.HNSSupportedFeatures - -type HNSAclFeatures = hns.HNSAclFeatures - -func GetHNSSupportedFeatures() HNSSupportedFeatures { - return hns.GetHNSSupportedFeatures() -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/interface.go b/test/vendor/github.com/Microsoft/hcsshim/interface.go deleted file mode 100644 index 81a2819516..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/interface.go +++ /dev/null @@ -1,116 +0,0 @@ -//go:build windows - -package hcsshim - -import ( - "io" - "time" - - "github.com/Microsoft/hcsshim/internal/hcs/schema1" -) - -// ProcessConfig is used as both the input of Container.CreateProcess -// and to convert the parameters to JSON for passing onto the HCS -type ProcessConfig = schema1.ProcessConfig - -type Layer = schema1.Layer -type MappedDir = schema1.MappedDir -type MappedPipe = schema1.MappedPipe -type HvRuntime = schema1.HvRuntime -type MappedVirtualDisk = schema1.MappedVirtualDisk - -// AssignedDevice represents a device that has been directly assigned to a container -// -// NOTE: Support added in RS5 -type AssignedDevice = schema1.AssignedDevice - -// ContainerConfig is used as both the input of CreateContainer -// and to convert the parameters to JSON for passing onto the HCS -type ContainerConfig = schema1.ContainerConfig - -type ComputeSystemQuery = schema1.ComputeSystemQuery - -// Container represents a created (but not necessarily running) container. -type Container interface { - // Start synchronously starts the container. - Start() error - - // Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. - Shutdown() error - - // Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. - Terminate() error - - // Waits synchronously waits for the container to shutdown or terminate. - Wait() error - - // WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It - // returns false if timeout occurs. - WaitTimeout(time.Duration) error - - // Pause pauses the execution of a container. - Pause() error - - // Resume resumes the execution of a container. - Resume() error - - // HasPendingUpdates returns true if the container has updates pending to install. - HasPendingUpdates() (bool, error) - - // Statistics returns statistics for a container. - Statistics() (Statistics, error) - - // ProcessList returns details for the processes in a container. - ProcessList() ([]ProcessListItem, error) - - // MappedVirtualDisks returns virtual disks mapped to a utility VM, indexed by controller - MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) - - // CreateProcess launches a new process within the container. - CreateProcess(c *ProcessConfig) (Process, error) - - // OpenProcess gets an interface to an existing process within the container. - OpenProcess(pid int) (Process, error) - - // Close cleans up any state associated with the container but does not terminate or wait for it. - Close() error - - // Modify the System - Modify(config *ResourceModificationRequestResponse) error -} - -// Process represents a running or exited process. -type Process interface { - // Pid returns the process ID of the process within the container. - Pid() int - - // Kill signals the process to terminate but does not wait for it to finish terminating. - Kill() error - - // Wait waits for the process to exit. - Wait() error - - // WaitTimeout waits for the process to exit or the duration to elapse. It returns - // false if timeout occurs. - WaitTimeout(time.Duration) error - - // ExitCode returns the exit code of the process. The process must have - // already terminated. - ExitCode() (int, error) - - // ResizeConsole resizes the console of the process. - ResizeConsole(width, height uint16) error - - // Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing - // these pipes does not close the underlying pipes; it should be possible to - // call this multiple times to get multiple interfaces. - Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) - - // CloseStdin closes the write side of the stdin pipe so that the process is - // notified on the read side that there is no more data in stdin. - CloseStdin() error - - // Close cleans up any state associated with the process but does not kill - // or wait on it. - Close() error -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/clone/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/clone/doc.go deleted file mode 100644 index c65f2e337e..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/clone/doc.go +++ /dev/null @@ -1 +0,0 @@ -package clone diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/clone/registry.go b/test/vendor/github.com/Microsoft/hcsshim/internal/clone/registry.go deleted file mode 100644 index 1727d57afb..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/clone/registry.go +++ /dev/null @@ -1,170 +0,0 @@ -//go:build windows - -package clone - -import ( - "bytes" - "context" - "encoding/gob" - "fmt" - - "github.com/Microsoft/hcsshim/internal/regstate" - "github.com/Microsoft/hcsshim/internal/uvm" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -const ( - configRoot = "LateClone" - configKey = "UVMConfig" - templateConfigCurrentSerialVersionID = 1 -) - -// TemplateConfig struct maintains all of the information about a template. This includes -// the information for both the template container and the template UVM. This struct is -// serialized and stored in the registry and hence is version controlled. -// Note: Update the `templateConfigCurrentSerialVersionID` when this structure definition -// is changed. -type TemplateConfig struct { - SerialVersionID uint32 - TemplateUVMID string - TemplateUVMResources []uvm.Cloneable - TemplateUVMCreateOpts uvm.OptionsWCOW - TemplateContainerID string - // Below we store the container spec for the template container so that when - // cloning containers we can verify that a different spec is not provided for the - // cloned container. - TemplateContainerSpec specs.Spec -} - -// When encoding interfaces gob requires us to register the struct types that we will be -// using under those interfaces. This registration needs to happen on both sides i.e the -// side which encodes the data (i.e the shim process of the template) and the side which -// decodes the data (i.e the shim process of the clone). -// Go init function: https://golang.org/doc/effective_go.html#init -func init() { - // Register the pointer to structs because that is what is being stored. - gob.Register(&uvm.VSMBShare{}) - gob.Register(&uvm.SCSIMount{}) -} - -func encodeTemplateConfig(templateConfig *TemplateConfig) ([]byte, error) { - var buf bytes.Buffer - - encoder := gob.NewEncoder(&buf) - if err := encoder.Encode(templateConfig); err != nil { - return nil, fmt.Errorf("error while encoding template config: %s", err) - } - return buf.Bytes(), nil -} - -func decodeTemplateConfig(encodedBytes []byte) (*TemplateConfig, error) { - var templateConfig TemplateConfig - - reader := bytes.NewReader(encodedBytes) - decoder := gob.NewDecoder(reader) - if err := decoder.Decode(&templateConfig); err != nil { - return nil, fmt.Errorf("error while decoding template config: %s", err) - } - return &templateConfig, nil -} - -// loadPersistedUVMConfig loads a persisted config from the registry that matches the given ID -// If not found returns `regstate.NotFoundError` -func loadPersistedUVMConfig(id string) ([]byte, error) { - sk, err := regstate.Open(configRoot, false) - if err != nil { - return nil, err - } - defer sk.Close() - - var encodedConfig []byte - if err := sk.Get(id, configKey, &encodedConfig); err != nil { - return nil, err - } - return encodedConfig, nil -} - -// storePersistedUVMConfig stores the given config to the registry. -// If the store fails returns the store error. -func storePersistedUVMConfig(id string, encodedConfig []byte) error { - sk, err := regstate.Open(configRoot, false) - if err != nil { - return err - } - defer sk.Close() - - if err := sk.Create(id, configKey, encodedConfig); err != nil { - return err - } - return nil -} - -// removePersistedUVMConfig removes any persisted state associated with this config. If the config -// is not found in the registry `Remove` returns no error. -func removePersistedUVMConfig(id string) error { - sk, err := regstate.Open(configRoot, false) - if err != nil { - if regstate.IsNotFoundError(err) { - return nil - } - return err - } - defer sk.Close() - - if err := sk.Remove(id); err != nil { - if regstate.IsNotFoundError(err) { - return nil - } - return err - } - return nil -} - -// Saves all the information required to create a clone from the template -// of this container into the registry. -func SaveTemplateConfig(ctx context.Context, templateConfig *TemplateConfig) error { - _, err := loadPersistedUVMConfig(templateConfig.TemplateUVMID) - if !regstate.IsNotFoundError(err) { - return fmt.Errorf("parent VM(ID: %s) config shouldn't exit in registry (%s)", templateConfig.TemplateUVMID, err) - } - - // set the serial version before encoding - templateConfig.SerialVersionID = templateConfigCurrentSerialVersionID - - encodedBytes, err := encodeTemplateConfig(templateConfig) - if err != nil { - return fmt.Errorf("failed to encode template config: %s", err) - } - - if err := storePersistedUVMConfig(templateConfig.TemplateUVMID, encodedBytes); err != nil { - return fmt.Errorf("failed to store encoded template config: %s", err) - } - - return nil -} - -// Removes all the state associated with the template with given ID -// If there is no state associated with this ID then the function simply returns without -// doing anything. -func RemoveSavedTemplateConfig(id string) error { - return removePersistedUVMConfig(id) -} - -// Retrieves the UVMTemplateConfig for the template with given ID from the registry. -func FetchTemplateConfig(ctx context.Context, id string) (*TemplateConfig, error) { - encodedBytes, err := loadPersistedUVMConfig(id) - if err != nil { - return nil, fmt.Errorf("failed to fetch encoded template config: %s", err) - } - - templateConfig, err := decodeTemplateConfig(encodedBytes) - if err != nil { - return nil, fmt.Errorf("failed to decode template config: %s", err) - } - - if templateConfig.SerialVersionID != templateConfigCurrentSerialVersionID { - return nil, fmt.Errorf("serialized version of TemplateConfig: %d doesn't match with the current version: %d", templateConfig.SerialVersionID, templateConfigCurrentSerialVersionID) - } - - return templateConfig, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/cmd.go b/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/cmd.go deleted file mode 100644 index d7228619eb..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/cmd.go +++ /dev/null @@ -1,321 +0,0 @@ -//go:build windows - -package cmd - -import ( - "bytes" - "context" - "fmt" - "io" - "strings" - "sync/atomic" - "time" - - "github.com/Microsoft/hcsshim/internal/cow" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" - "golang.org/x/sys/windows" -) - -// CmdProcessRequest stores information on command requests made through this package. -type CmdProcessRequest struct { - Args []string - Workdir string - Terminal bool - Stdin string - Stdout string - Stderr string -} - -// Cmd represents a command being prepared or run in a process host. -type Cmd struct { - // Host is the process host in which to launch the process. - Host cow.ProcessHost - - // The OCI spec for the process. - Spec *specs.Process - - // Standard IO streams to relay to/from the process. - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - - // Log provides a logrus entry to use in logging IO copying status. - Log *logrus.Entry - - // Context provides a context that terminates the process when it is done. - Context context.Context - - // CopyAfterExitTimeout is the amount of time after process exit we allow the - // stdout, stderr relays to continue before forcibly closing them if not - // already completed. This is primarily a safety step against the HCS when - // it fails to send a close on the stdout, stderr pipes when the process - // exits and blocks the relay wait groups forever. - CopyAfterExitTimeout time.Duration - - // Process is filled out after Start() returns. - Process cow.Process - - // ExitState is filled out after Wait() (or Run() or Output()) completes. - ExitState *ExitState - - iogrp errgroup.Group - stdinErr atomic.Value - allDoneCh chan struct{} -} - -// ExitState contains whether a process has exited and with which exit code. -type ExitState struct { - exited bool - code int -} - -// ExitCode returns the exit code of the process, or -1 if the exit code is not known. -func (s *ExitState) ExitCode() int { - if !s.exited { - return -1 - } - return s.code -} - -// ExitError is used when a process exits with a non-zero exit code. -type ExitError struct { - *ExitState -} - -func (err *ExitError) Error() string { - return fmt.Sprintf("process exited with exit code %d", err.ExitCode()) -} - -// Additional fields to hcsschema.ProcessParameters used by LCOW -type lcowProcessParameters struct { - hcsschema.ProcessParameters - OCIProcess *specs.Process `json:"OciProcess,omitempty"` -} - -// escapeArgs makes a Windows-style escaped command line from a set of arguments -func escapeArgs(args []string) string { - escapedArgs := make([]string, len(args)) - for i, a := range args { - escapedArgs[i] = windows.EscapeArg(a) - } - return strings.Join(escapedArgs, " ") -} - -// Command makes a Cmd for a given command and arguments. -func Command(host cow.ProcessHost, name string, arg ...string) *Cmd { - cmd := &Cmd{ - Host: host, - Spec: &specs.Process{ - Args: append([]string{name}, arg...), - }, - Log: log.L.Dup(), - ExitState: &ExitState{}, - } - if host.OS() == "windows" { - cmd.Spec.Cwd = `C:\` - } else { - cmd.Spec.Cwd = "/" - cmd.Spec.Env = []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"} - } - return cmd -} - -// CommandContext makes a Cmd for a given command and arguments. After -// it is launched, the process is killed when the context becomes done. -func CommandContext(ctx context.Context, host cow.ProcessHost, name string, arg ...string) *Cmd { - cmd := Command(host, name, arg...) - cmd.Context = ctx - cmd.Log = log.G(ctx) - return cmd -} - -// Start starts a command. The caller must ensure that if Start succeeds, -// Wait is eventually called to clean up resources. -func (c *Cmd) Start() error { - c.allDoneCh = make(chan struct{}) - var x interface{} - if !c.Host.IsOCI() { - wpp := &hcsschema.ProcessParameters{ - CommandLine: c.Spec.CommandLine, - User: c.Spec.User.Username, - WorkingDirectory: c.Spec.Cwd, - EmulateConsole: c.Spec.Terminal, - CreateStdInPipe: c.Stdin != nil, - CreateStdOutPipe: c.Stdout != nil, - CreateStdErrPipe: c.Stderr != nil, - } - - if c.Spec.CommandLine == "" { - if c.Host.OS() == "windows" { - wpp.CommandLine = escapeArgs(c.Spec.Args) - } else { - wpp.CommandArgs = c.Spec.Args - } - } - - environment := make(map[string]string) - for _, v := range c.Spec.Env { - s := strings.SplitN(v, "=", 2) - if len(s) == 2 && len(s[1]) > 0 { - environment[s[0]] = s[1] - } - } - wpp.Environment = environment - - if c.Spec.ConsoleSize != nil { - wpp.ConsoleSize = []int32{ - int32(c.Spec.ConsoleSize.Height), - int32(c.Spec.ConsoleSize.Width), - } - } - x = wpp - } else { - lpp := &lcowProcessParameters{ - ProcessParameters: hcsschema.ProcessParameters{ - CreateStdInPipe: c.Stdin != nil, - CreateStdOutPipe: c.Stdout != nil, - CreateStdErrPipe: c.Stderr != nil, - }, - OCIProcess: c.Spec, - } - x = lpp - } - if c.Context != nil && c.Context.Err() != nil { - return c.Context.Err() - } - p, err := c.Host.CreateProcess(context.TODO(), x) - if err != nil { - return err - } - c.Process = p - if c.Log != nil { - c.Log = c.Log.WithField("pid", p.Pid()) - } - - // Start relaying process IO. - stdin, stdout, stderr := p.Stdio() - if c.Stdin != nil { - // Do not make stdin part of the error group because there is no way for - // us or the caller to reliably unblock the c.Stdin read when the - // process exits. - go func() { - _, err := relayIO(stdin, c.Stdin, c.Log, "stdin") - // Report the stdin copy error. If the process has exited, then the - // caller may never see it, but if the error was due to a failure in - // stdin read, then it is likely the process is still running. - if err != nil { - c.stdinErr.Store(err) - } - // Notify the process that there is no more input. - if err := p.CloseStdin(context.TODO()); err != nil && c.Log != nil { - c.Log.WithError(err).Warn("failed to close Cmd stdin") - } - }() - } - - if c.Stdout != nil { - c.iogrp.Go(func() error { - _, err := relayIO(c.Stdout, stdout, c.Log, "stdout") - if err := p.CloseStdout(context.TODO()); err != nil { - c.Log.WithError(err).Warn("failed to close Cmd stdout") - } - return err - }) - } - - if c.Stderr != nil { - c.iogrp.Go(func() error { - _, err := relayIO(c.Stderr, stderr, c.Log, "stderr") - if err := p.CloseStderr(context.TODO()); err != nil { - c.Log.WithError(err).Warn("failed to close Cmd stderr") - } - return err - }) - } - - if c.Context != nil { - go func() { - select { - case <-c.Context.Done(): - // Process.Kill (via Process.Signal) will not send an RPC if the - // provided context in is cancelled (bridge.AsyncRPC will end early) - ctx := c.Context - if ctx == nil { - ctx = context.Background() - } - kctx := log.Copy(context.Background(), ctx) - _, _ = c.Process.Kill(kctx) - case <-c.allDoneCh: - } - }() - } - return nil -} - -// Wait waits for a command and its IO to complete and closes the underlying -// process. It can only be called once. It returns an ExitError if the command -// runs and returns a non-zero exit code. -func (c *Cmd) Wait() error { - waitErr := c.Process.Wait() - if waitErr != nil && c.Log != nil { - c.Log.WithError(waitErr).Warn("process wait failed") - } - state := &ExitState{} - code, exitErr := c.Process.ExitCode() - if exitErr == nil { - state.exited = true - state.code = code - } - // Terminate the IO if the copy does not complete in the requested time. - if c.CopyAfterExitTimeout != 0 { - go func() { - t := time.NewTimer(c.CopyAfterExitTimeout) - defer t.Stop() - select { - case <-c.allDoneCh: - case <-t.C: - // Close the process to cancel any reads to stdout or stderr. - c.Process.Close() - if c.Log != nil { - c.Log.Warn("timed out waiting for stdio relay") - } - } - }() - } - ioErr := c.iogrp.Wait() - if ioErr == nil { - ioErr, _ = c.stdinErr.Load().(error) - } - close(c.allDoneCh) - c.Process.Close() - c.ExitState = state - if exitErr != nil { - return exitErr - } - if state.exited && state.code != 0 { - return &ExitError{state} - } - return ioErr -} - -// Run is equivalent to Start followed by Wait. -func (c *Cmd) Run() error { - err := c.Start() - if err != nil { - return err - } - return c.Wait() -} - -// Output runs a command via Run and collects its stdout into a buffer, -// which it returns. -func (c *Cmd) Output() ([]byte, error) { - var b bytes.Buffer - c.Stdout = &b - err := c.Run() - return b.Bytes(), err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/diag.go b/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/diag.go deleted file mode 100644 index e397bb85ee..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/diag.go +++ /dev/null @@ -1,68 +0,0 @@ -//go:build windows - -package cmd - -import ( - "context" - "errors" - "os/exec" - - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/uvm" -) - -// ExecInUvm is a helper function used to execute commands specified in `req` inside the given UVM. -func ExecInUvm(ctx context.Context, vm *uvm.UtilityVM, req *CmdProcessRequest) (int, error) { - if len(req.Args) == 0 { - return 0, errors.New("missing command") - } - np, err := NewNpipeIO(ctx, req.Stdin, req.Stdout, req.Stderr, req.Terminal, 0) - if err != nil { - return 0, err - } - defer np.Close(ctx) - cmd := CommandContext(ctx, vm, req.Args[0], req.Args[1:]...) - if req.Workdir != "" { - cmd.Spec.Cwd = req.Workdir - } - if vm.OS() == "windows" { - cmd.Spec.User.Username = `NT AUTHORITY\SYSTEM` - } - cmd.Spec.Terminal = req.Terminal - cmd.Stdin = np.Stdin() - cmd.Stdout = np.Stdout() - cmd.Stderr = np.Stderr() - cmd.Log = log.G(ctx).WithField(logfields.UVMID, vm.ID()) - err = cmd.Run() - return cmd.ExitState.ExitCode(), err -} - -// ExecInShimHost is a helper function used to execute commands specified in `req` in the shim's -// hosting system. -func ExecInShimHost(ctx context.Context, req *CmdProcessRequest) (int, error) { - if len(req.Args) == 0 { - return 0, errors.New("missing command") - } - cmdArgsWithoutName := []string{""} - if len(req.Args) > 1 { - cmdArgsWithoutName = req.Args[1:] - } - np, err := NewNpipeIO(ctx, req.Stdin, req.Stdout, req.Stderr, req.Terminal, 0) - if err != nil { - return 0, err - } - defer np.Close(ctx) - cmd := exec.Command(req.Args[0], cmdArgsWithoutName...) - cmd.Stdin = np.Stdin() - cmd.Stdout = np.Stdout() - cmd.Stderr = np.Stderr() - err = cmd.Run() - if err != nil { - if exiterr, ok := err.(*exec.ExitError); ok { - return exiterr.ExitCode(), exiterr - } - return -1, err - } - return 0, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/doc.go deleted file mode 100644 index 7fe443fc92..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package cmd provides functionality used to execute commands inside of containers -// or UVMs, and to connect an upstream client to those commands for handling in/out/err IO. -package cmd diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/io.go b/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/io.go deleted file mode 100644 index 75ddd1f355..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/io.go +++ /dev/null @@ -1,82 +0,0 @@ -//go:build windows - -package cmd - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// UpstreamIO is an interface describing the IO to connect to above the shim. -// Depending on the callers settings there may be no opened IO. -type UpstreamIO interface { - // Close closes all open io. - // - // This call is idempotent and safe to call multiple times. - Close(ctx context.Context) - // CloseStdin closes just `Stdin()` if open. - // - // This call is idempotent and safe to call multiple times. - CloseStdin(ctx context.Context) - // Stdin returns the open `stdin` reader. If `stdin` was never opened this - // will return `nil`. - Stdin() io.Reader - // StdinPath returns the original path used to open the `Stdin()` reader. - StdinPath() string - // Stdout returns the open `stdout` writer. If `stdout` was never opened - // this will return `nil`. - Stdout() io.Writer - // StdoutPath returns the original path used to open the `Stdout()` writer. - StdoutPath() string - // Stderr returns the open `stderr` writer. If `stderr` was never opened - // this will return `nil`. - Stderr() io.Writer - // StderrPath returns the original path used to open the `Stderr()` writer. - StderrPath() string - // Terminal returns `true` if the connection is emulating a terminal. If - // `true` `Stderr()` will always return `nil` and `StderrPath()` will always - // return `""`. - Terminal() bool -} - -// NewUpstreamIO returns an UpstreamIO instance. Currently we only support named pipes and binary -// logging driver for container IO. When using binary logger `stdout` and `stderr` are assumed to be -// the same and the value of `stderr` is completely ignored. -func NewUpstreamIO(ctx context.Context, id, stdout, stderr, stdin string, terminal bool, ioRetryTimeout time.Duration) (UpstreamIO, error) { - u, err := url.Parse(stdout) - - // Create IO with named pipes. - if err != nil || u.Scheme == "" { - return NewNpipeIO(ctx, stdin, stdout, stderr, terminal, ioRetryTimeout) - } - - // Create IO for binary logging driver. - if u.Scheme != "binary" { - return nil, errors.Errorf("scheme must be 'binary', got: '%s'", u.Scheme) - } - - return NewBinaryIO(ctx, id, u) -} - -// relayIO is a glorified io.Copy that also logs when the copy has completed. -func relayIO(w io.Writer, r io.Reader, log *logrus.Entry, name string) (int64, error) { - n, err := io.Copy(w, r) - if log != nil { - lvl := logrus.DebugLevel - log = log.WithFields(logrus.Fields{ - "file": name, - "bytes": n, - }) - if err != nil { - lvl = logrus.ErrorLevel - log = log.WithError(err) - } - log.Log(lvl, "Cmd IO relay complete") - } - return n, err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/io_binary.go b/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/io_binary.go deleted file mode 100644 index 989a53c93c..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/io_binary.go +++ /dev/null @@ -1,290 +0,0 @@ -//go:build windows - -package cmd - -import ( - "context" - "fmt" - "io" - "net" - "net/url" - "os/exec" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/Microsoft/go-winio" - "github.com/containerd/containerd/namespaces" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/Microsoft/hcsshim/internal/log" -) - -const ( - binaryPipeFmt = `\\.\pipe\binary-%s-%s` - binaryCmdWaitTimeout = 10 * time.Second - binaryCmdStartTimeout = 10 * time.Second -) - -// NewBinaryIO runs a custom binary process for pluggable shim logging driver. -// -// Container's IO will be redirected to the logging driver via named pipes, which are -// passed as "CONTAINER_STDOUT", "CONTAINER_STDERR" environment variables. The logging -// driver MUST dial a wait pipe passed via "CONTAINER_WAIT" environment variable AND CLOSE -// it to indicate that it's ready to consume the IO. For customer's convenience container ID -// and namespace are also passed via "CONTAINER_ID" and "CONTAINER_NAMESPACE". -// -// The path to the logging driver can be provided via a URL's host/path. Additional arguments -// can be passed to the logger via URL query params -func NewBinaryIO(ctx context.Context, id string, uri *url.URL) (_ UpstreamIO, err error) { - ns, err := namespaces.NamespaceRequired(ctx) - if err != nil { - ns = namespaces.Default - } - - var stdoutPipe, stderrPipe, waitPipe io.ReadWriteCloser - - stdoutPipePath := fmt.Sprintf(binaryPipeFmt, id, "stdout") - stdoutPipe, err = openNPipe(stdoutPipePath) - if err != nil { - return nil, err - } - - stderrPipePath := fmt.Sprintf(binaryPipeFmt, id, "stderr") - stderrPipe, err = openNPipe(stderrPipePath) - if err != nil { - return nil, err - } - - waitPipePath := fmt.Sprintf(binaryPipeFmt, id, "wait") - waitPipe, err = openNPipe(waitPipePath) - if err != nil { - return nil, err - } - defer func() { - if err := waitPipe.Close(); err != nil { - log.G(ctx).WithError(err).Errorf("error closing wait pipe: %s", waitPipePath) - } - }() - - envs := []string{ - "CONTAINER_ID=" + id, - "CONTAINER_NAMESPACE=" + ns, - "CONTAINER_STDOUT=" + stdoutPipePath, - "CONTAINER_STDERR=" + stderrPipePath, - "CONTAINER_WAIT=" + waitPipePath, - } - cmd, err := newBinaryCmd(ctx, uri, envs) - if err != nil { - return nil, err - } - - if err := cmd.Start(); err != nil { - return nil, err - } - - errCh := make(chan error, 1) - // Wait for logging driver to signal to the wait pipe that it's ready to consume IO - go func() { - b := make([]byte, 1) - if _, err := waitPipe.Read(b); err != nil && err != io.EOF { - errCh <- err - return - } - errCh <- nil - }() - - select { - case err = <-errCh: - if err != nil { - return nil, errors.Wrap(err, "failed to start binary logger") - } - case <-time.After(binaryCmdStartTimeout): - return nil, errors.New("failed to start binary logger: timeout") - } - - log.G(ctx).WithFields(logrus.Fields{ - "containerID": id, - "containerNamespace": ns, - "binaryCmd": cmd.String(), - "binaryProcessID": cmd.Process.Pid, - }).Debug("binary io process started") - - return &binaryIO{ - cmd: cmd, - stdout: stdoutPipePath, - sout: stdoutPipe, - stderr: stderrPipePath, - serr: stderrPipe, - }, nil -} - -// sanitizePath parses the URL object and returns a clean path to the logging driver -func sanitizePath(uri *url.URL) string { - path := filepath.Clean(uri.Path) - - if strings.Contains(path, `:\`) { - return strings.TrimPrefix(path, "\\") - } - - return path -} - -func newBinaryCmd(ctx context.Context, uri *url.URL, envs []string) (*exec.Cmd, error) { - if uri.Path == "" { - return nil, errors.New("no logging driver path provided") - } - - var args []string - for k, vs := range uri.Query() { - args = append(args, k) - if len(vs) > 0 && vs[0] != "" { - args = append(args, vs[0]) - } - } - - execPath := sanitizePath(uri) - - cmd := exec.CommandContext(ctx, execPath, args...) - cmd.Env = append(cmd.Env, envs...) - - return cmd, nil -} - -var _ UpstreamIO = &binaryIO{} - -// Implements UpstreamIO interface to enable shim pluggable logging -type binaryIO struct { - cmd *exec.Cmd - - binaryCloser sync.Once - - stdout, stderr string - - sout, serr io.ReadWriteCloser - soutCloser sync.Once -} - -// Close named pipes for container stdout and stderr and wait for the binary process to finish. -func (b *binaryIO) Close(ctx context.Context) { - b.soutCloser.Do(func() { - if b.sout != nil { - err := b.sout.Close() - if err != nil { - log.G(ctx).WithError(err).Errorf("error while closing stdout npipe") - } - } - if b.serr != nil { - err := b.serr.Close() - if err != nil { - log.G(ctx).WithError(err).Errorf("error while closing stderr npipe") - } - } - }) - b.binaryCloser.Do(func() { - done := make(chan error, 1) - go func() { - done <- b.cmd.Wait() - }() - - select { - case err := <-done: - if err != nil { - log.G(ctx).WithError(err).Errorf("error while waiting for binary cmd to finish") - } - case <-time.After(binaryCmdWaitTimeout): - log.G(ctx).Errorf("timeout while waiting for binaryIO process to finish. Killing") - err := b.cmd.Process.Kill() - if err != nil { - log.G(ctx).WithError(err).Errorf("error while killing binaryIO process") - } - } - }) -} - -func (b *binaryIO) CloseStdin(_ context.Context) {} - -func (b *binaryIO) Stdin() io.Reader { - return nil -} - -func (b *binaryIO) StdinPath() string { - return "" -} - -func (b *binaryIO) Stdout() io.Writer { - return b.sout -} - -func (b *binaryIO) StdoutPath() string { - return b.stdout -} - -func (b *binaryIO) Stderr() io.Writer { - return b.serr -} - -func (b *binaryIO) StderrPath() string { - return b.stderr -} - -func (b *binaryIO) Terminal() bool { - return false -} - -type pipe struct { - l net.Listener - con net.Conn - conErr error - conWg sync.WaitGroup -} - -func openNPipe(path string) (io.ReadWriteCloser, error) { - l, err := winio.ListenPipe(path, nil) - if err != nil { - return nil, err - } - - p := &pipe{l: l} - p.conWg.Add(1) - - go func() { - defer p.conWg.Done() - c, err := l.Accept() - if err != nil { - p.conErr = err - return - } - p.con = c - }() - return p, nil -} - -func (p *pipe) Write(b []byte) (int, error) { - p.conWg.Wait() - if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") - } - return p.con.Write(b) -} - -func (p *pipe) Read(b []byte) (int, error) { - p.conWg.Wait() - if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") - } - return p.con.Read(b) -} - -func (p *pipe) Close() error { - if err := p.l.Close(); err != nil { - log.G(context.TODO()).WithError(err).Debug("error closing pipe listener") - } - p.conWg.Wait() - if p.con != nil { - return p.con.Close() - } - return p.conErr -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/io_npipe.go b/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/io_npipe.go deleted file mode 100644 index 614f34ca29..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/cmd/io_npipe.go +++ /dev/null @@ -1,296 +0,0 @@ -//go:build windows - -package cmd - -import ( - "context" - "fmt" - "io" - "math/rand" - "net" - "sync" - "syscall" - "time" - - winio "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/cenkalti/backoff/v4" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -func init() { - // Need to seed for the rng in backoff.NextBackoff() - rand.Seed(time.Now().UnixNano()) -} - -// NewNpipeIO creates connected upstream io. It is the callers responsibility to validate that `if terminal == true`, `stderr == ""`. retryTimeout -// refers to the timeout used to try and reconnect to the server end of the named pipe if the connection is severed. A value of 0 for retryTimeout -// is treated as an infinite timeout. -func NewNpipeIO(ctx context.Context, stdin, stdout, stderr string, terminal bool, retryTimeout time.Duration) (_ UpstreamIO, err error) { - log.G(ctx).WithFields(logrus.Fields{ - "stdin": stdin, - "stdout": stdout, - "stderr": stderr, - "terminal": terminal, - }).Debug("NewNpipeIO") - - nio := &npipeio{ - stdin: stdin, - stdout: stdout, - stderr: stderr, - terminal: terminal, - } - defer func() { - if err != nil { - nio.Close(ctx) - } - }() - - if stdin != "" { - c, err := winio.DialPipeContext(ctx, stdin) - if err != nil { - return nil, err - } - // We don't have any retry logic for stdin as there's no good way to detect that we'd even need to retry. If the process forwarding - // stdin to the container (some client interface to exec a process in a container) exited, we'll get EOF which io.Copy treats as - // success. For fifos on Linux it seems if all fd's for the write end of the pipe disappear, which is the same scenario, then - // the read end will get EOF as well. - nio.sin = c - } - if stdout != "" { - c, err := winio.DialPipeContext(ctx, stdout) - if err != nil { - return nil, err - } - nio.sout = &nPipeRetryWriter{ctx, c, stdout, newBackOff(retryTimeout)} - } - if stderr != "" { - c, err := winio.DialPipeContext(ctx, stderr) - if err != nil { - return nil, err - } - nio.serr = &nPipeRetryWriter{ctx, c, stderr, newBackOff(retryTimeout)} - } - return nio, nil -} - -// nPipeRetryWriter is an io.Writer that wraps a net.Conn representing a named pipe connection. The retry logic is specifically only for -// disconnect scenarios (pipe broken, server went away etc.) to attempt to re-establish a connection, and is not for retrying writes on a busy pipe. -type nPipeRetryWriter struct { - ctx context.Context - net.Conn - pipePath string - backOff backoff.BackOff -} - -// newBackOff returns a new BackOff interface. The values chosen are fairly conservative, the main use is to get a somewhat random -// retry timeout on each ask. This can help avoid flooding a server all at once. -func newBackOff(timeout time.Duration) backoff.BackOff { - return &backoff.ExponentialBackOff{ - // First backoff timeout will be somewhere in the 100 - 300 ms range given the default multiplier. - InitialInterval: time.Millisecond * 200, - RandomizationFactor: backoff.DefaultRandomizationFactor, - Multiplier: backoff.DefaultMultiplier, - // Set the max interval to a minute, seems like a sane value. We don't know how long the server will be down for, and if we reached - // this point it's been down for quite awhile. - MaxInterval: time.Minute * 1, - // `backoff.ExponentialBackoff` treats a 0 timeout as infinite, which is ideal as it's the logic we desire. - MaxElapsedTime: timeout, - Stop: backoff.Stop, - Clock: backoff.SystemClock, - } -} - -func (nprw *nPipeRetryWriter) Write(p []byte) (n int, err error) { - var currBufPos int - for { - // p[currBufPos:] to handle a case where we wrote n bytes but got disconnected and now we just need to write the rest of the buffer. If this is the - // first write then the current position is 0 so we just try and write the whole buffer as usual. - n, err = nprw.Conn.Write(p[currBufPos:]) - currBufPos += n - if err != nil { - // If the error is one that we can discern calls for a retry, attempt to redial the pipe. - if isDisconnectedErr(err) { - // Log that we're going to retry establishing the connection. - log.G(nprw.ctx).WithFields(logrus.Fields{ - "address": nprw.pipePath, - logrus.ErrorKey: err, - }).Error("Named pipe disconnected, retrying dial") - - // Close the old conn first. - nprw.Conn.Close() - newConn, retryErr := nprw.retryDialPipe() - if retryErr == nil { - log.G(nprw.ctx).WithField("address", nprw.pipePath).Info("Succeeded in reconnecting to named pipe") - - nprw.Conn = newConn - continue - } - err = retryErr - } - } - return currBufPos, err - } -} - -// retryDialPipe is a helper to retry dialing a named pipe until the timeout of nprw.BackOff or a successful connection. This is mainly to -// assist in scenarios where the server end of the pipe has crashed/went away and is no longer accepting new connections but may -// come back online. The backoff used inside is to try and space out connections to the server as to not flood it all at once with connection -// attempts at the same interval. -func (nprw *nPipeRetryWriter) retryDialPipe() (net.Conn, error) { - // Reset the backoff object as it starts ticking down when it's created. This also ensures we can re-use it in the event the server goes - // away more than once. - nprw.backOff.Reset() - for { - backOffTime := nprw.backOff.NextBackOff() - // We don't simply use a context with a timeout and pass it to DialPipe because DialPipe only retries the connection (and thus makes use of - // the timeout) if it sees that the pipe is busy. If the server isn't up/not listening it will just error out immediately and not make use - // of the timeout passed. That's the case we're most likely in right now so we need our own retry logic on top. - conn, err := winio.DialPipe(nprw.pipePath, nil) - if err == nil { - return conn, nil - } - // Next backoff would go over our timeout. We've tried once more above due to the ordering of this check, but now we need to bail out. - if backOffTime == backoff.Stop { - return nil, fmt.Errorf("reached timeout while retrying dial on %s", nprw.pipePath) - } - time.Sleep(backOffTime) - } -} - -// isDisconnectedErr is a helper to determine if the error received from writing to the server end of a named pipe indicates a disconnect/severed -// connection. This can be used to attempt a redial if it's expected that the server will come back online at some point. -func isDisconnectedErr(err error) bool { - if serr, ok := err.(syscall.Errno); ok { - // Server went away/something went wrong. - return serr == windows.ERROR_NO_DATA || serr == windows.ERROR_PIPE_NOT_CONNECTED || serr == windows.ERROR_BROKEN_PIPE - } - return false -} - -var _ = (UpstreamIO)(&npipeio{}) - -type npipeio struct { - // stdin, stdout, stderr are the original paths used to open the connections. - // - // They MUST be treated as readonly in the lifetime of the pipe io. - stdin, stdout, stderr string - // terminal is the original setting passed in on open. - // - // This MUST be treated as readonly in the lifetime of the pipe io. - terminal bool - - // sin is the upstream `stdin` connection. - // - // `sin` MUST be treated as readonly in the lifetime of the pipe io after - // the return from `NewNpipeIO`. - sin io.ReadCloser - sinCloser sync.Once - - // sout and serr are the upstream `stdout` and `stderr` connections. - // - // `sout` and `serr` MUST be treated as readonly in the lifetime of the pipe - // io after the return from `NewNpipeIO`. - sout, serr io.WriteCloser - outErrCloser sync.Once -} - -func (nio *npipeio) Close(ctx context.Context) { - nio.sinCloser.Do(func() { - if nio.sin != nil { - log.G(ctx).Debug("npipeio::sinCloser") - nio.sin.Close() - } - }) - nio.outErrCloser.Do(func() { - if nio.sout != nil { - log.G(ctx).Debug("npipeio::outErrCloser - stdout") - nio.sout.Close() - } - if nio.serr != nil { - log.G(ctx).Debug("npipeio::outErrCloser - stderr") - nio.serr.Close() - } - }) -} - -func (nio *npipeio) CloseStdin(ctx context.Context) { - nio.sinCloser.Do(func() { - if nio.sin != nil { - log.G(ctx).Debug("npipeio::sinCloser") - nio.sin.Close() - } - }) -} - -func (nio *npipeio) Stdin() io.Reader { - return nio.sin -} - -func (nio *npipeio) StdinPath() string { - return nio.stdin -} - -func (nio *npipeio) Stdout() io.Writer { - return nio.sout -} - -func (nio *npipeio) StdoutPath() string { - return nio.stdout -} - -func (nio *npipeio) Stderr() io.Writer { - return nio.serr -} - -func (nio *npipeio) StderrPath() string { - return nio.stderr -} - -func (nio *npipeio) Terminal() bool { - return nio.terminal -} - -// CreatePipeAndListen is a helper function to create a pipe listener -// and accept connections. Returns the created pipe path on success. -// -// If `in` is true, `f` should implement io.Reader -// If `in` is false, `f` should implement io.Writer -func CreatePipeAndListen(f interface{}, in bool) (string, error) { - p, l, err := CreateNamedPipeListener() - if err != nil { - return "", err - } - go func() { - c, err := l.Accept() - if err != nil { - logrus.WithError(err).Error("failed to accept pipe") - return - } - - if in { - _, _ = io.Copy(c, f.(io.Reader)) - c.Close() - } else { - _, _ = io.Copy(f.(io.Writer), c) - } - }() - return p, nil -} - -// CreateNamedPipeListener is a helper function to create and return a pipe listener -// and it's created path. -func CreateNamedPipeListener() (string, net.Listener, error) { - g, err := guid.NewV4() - if err != nil { - return "", nil, err - } - p := `\\.\pipe\` + g.String() - l, err := winio.ListenPipe(p, nil) - if err != nil { - return "", nil, err - } - return p, l, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/cni/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/cni/doc.go deleted file mode 100644 index b94015b5aa..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/cni/doc.go +++ /dev/null @@ -1 +0,0 @@ -package cni diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go b/test/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go deleted file mode 100644 index 3543a590d0..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go +++ /dev/null @@ -1,112 +0,0 @@ -//go:build windows - -package cni - -import ( - "errors" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/regstate" -) - -const ( - cniRoot = "cni" - cniKey = "cfg" -) - -// PersistedNamespaceConfig is the registry version of the `NamespaceID` to UVM -// map. -type PersistedNamespaceConfig struct { - namespaceID string - stored bool - - ContainerID string - HostUniqueID guid.GUID -} - -// NewPersistedNamespaceConfig creates an in-memory namespace config that can be -// persisted to the registry. -func NewPersistedNamespaceConfig(namespaceID, containerID string, containerHostUniqueID guid.GUID) *PersistedNamespaceConfig { - return &PersistedNamespaceConfig{ - namespaceID: namespaceID, - ContainerID: containerID, - HostUniqueID: containerHostUniqueID, - } -} - -// LoadPersistedNamespaceConfig loads a persisted config from the registry that matches -// `namespaceID`. If not found returns `regstate.NotFoundError` -func LoadPersistedNamespaceConfig(namespaceID string) (*PersistedNamespaceConfig, error) { - sk, err := regstate.Open(cniRoot, false) - if err != nil { - return nil, err - } - defer sk.Close() - - pnc := PersistedNamespaceConfig{ - namespaceID: namespaceID, - stored: true, - } - if err := sk.Get(namespaceID, cniKey, &pnc); err != nil { - return nil, err - } - return &pnc, nil -} - -// Store stores or updates the in-memory config to its registry state. If the -// store failes returns the store error. -func (pnc *PersistedNamespaceConfig) Store() error { - if pnc.namespaceID == "" { - return errors.New("invalid namespaceID ''") - } - if pnc.ContainerID == "" { - return errors.New("invalid containerID ''") - } - empty := guid.GUID{} - if pnc.HostUniqueID == empty { - return errors.New("invalid containerHostUniqueID 'empy'") - } - sk, err := regstate.Open(cniRoot, false) - if err != nil { - return err - } - defer sk.Close() - - if pnc.stored { - if err := sk.Set(pnc.namespaceID, cniKey, pnc); err != nil { - return err - } - } else { - if err := sk.Create(pnc.namespaceID, cniKey, pnc); err != nil { - return err - } - } - pnc.stored = true - return nil -} - -// Remove removes any persisted state associated with this config. If the config -// is not found in the registry `Remove` returns no error. -func (pnc *PersistedNamespaceConfig) Remove() error { - if pnc.stored { - sk, err := regstate.Open(cniRoot, false) - if err != nil { - if regstate.IsNotFoundError(err) { - pnc.stored = false - return nil - } - return err - } - defer sk.Close() - - if err := sk.Remove(pnc.namespaceID); err != nil { - if regstate.IsNotFoundError(err) { - pnc.stored = false - return nil - } - return err - } - } - pnc.stored = false - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/computeagent/computeagent.pb.go b/test/vendor/github.com/Microsoft/hcsshim/internal/computeagent/computeagent.pb.go deleted file mode 100644 index 5ec5e514a1..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/computeagent/computeagent.pb.go +++ /dev/null @@ -1,2673 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/Microsoft/hcsshim/internal/computeagent/computeagent.proto - -package computeagent - -import ( - context "context" - fmt "fmt" - github_com_containerd_ttrpc "github.com/containerd/ttrpc" - proto "github.com/gogo/protobuf/proto" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type AssignPCIInternalRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - DeviceID string `protobuf:"bytes,2,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"` - VirtualFunctionIndex uint32 `protobuf:"varint,3,opt,name=virtual_function_index,json=virtualFunctionIndex,proto3" json:"virtual_function_index,omitempty"` - NicID string `protobuf:"bytes,4,opt,name=nic_id,json=nicId,proto3" json:"nic_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AssignPCIInternalRequest) Reset() { *m = AssignPCIInternalRequest{} } -func (*AssignPCIInternalRequest) ProtoMessage() {} -func (*AssignPCIInternalRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7f2f03dc308add4c, []int{0} -} -func (m *AssignPCIInternalRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AssignPCIInternalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AssignPCIInternalRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AssignPCIInternalRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AssignPCIInternalRequest.Merge(m, src) -} -func (m *AssignPCIInternalRequest) XXX_Size() int { - return m.Size() -} -func (m *AssignPCIInternalRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AssignPCIInternalRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AssignPCIInternalRequest proto.InternalMessageInfo - -type AssignPCIInternalResponse struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AssignPCIInternalResponse) Reset() { *m = AssignPCIInternalResponse{} } -func (*AssignPCIInternalResponse) ProtoMessage() {} -func (*AssignPCIInternalResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7f2f03dc308add4c, []int{1} -} -func (m *AssignPCIInternalResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AssignPCIInternalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AssignPCIInternalResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AssignPCIInternalResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AssignPCIInternalResponse.Merge(m, src) -} -func (m *AssignPCIInternalResponse) XXX_Size() int { - return m.Size() -} -func (m *AssignPCIInternalResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AssignPCIInternalResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AssignPCIInternalResponse proto.InternalMessageInfo - -type RemovePCIInternalRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - DeviceID string `protobuf:"bytes,2,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"` - VirtualFunctionIndex uint32 `protobuf:"varint,3,opt,name=virtual_function_index,json=virtualFunctionIndex,proto3" json:"virtual_function_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RemovePCIInternalRequest) Reset() { *m = RemovePCIInternalRequest{} } -func (*RemovePCIInternalRequest) ProtoMessage() {} -func (*RemovePCIInternalRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7f2f03dc308add4c, []int{2} -} -func (m *RemovePCIInternalRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RemovePCIInternalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RemovePCIInternalRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RemovePCIInternalRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemovePCIInternalRequest.Merge(m, src) -} -func (m *RemovePCIInternalRequest) XXX_Size() int { - return m.Size() -} -func (m *RemovePCIInternalRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RemovePCIInternalRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RemovePCIInternalRequest proto.InternalMessageInfo - -type RemovePCIInternalResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RemovePCIInternalResponse) Reset() { *m = RemovePCIInternalResponse{} } -func (*RemovePCIInternalResponse) ProtoMessage() {} -func (*RemovePCIInternalResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7f2f03dc308add4c, []int{3} -} -func (m *RemovePCIInternalResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RemovePCIInternalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RemovePCIInternalResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RemovePCIInternalResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemovePCIInternalResponse.Merge(m, src) -} -func (m *RemovePCIInternalResponse) XXX_Size() int { - return m.Size() -} -func (m *RemovePCIInternalResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RemovePCIInternalResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_RemovePCIInternalResponse proto.InternalMessageInfo - -type AddNICInternalRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - NicID string `protobuf:"bytes,2,opt,name=nic_id,json=nicId,proto3" json:"nic_id,omitempty"` - Endpoint *types.Any `protobuf:"bytes,3,opt,name=endpoint,proto3" json:"endpoint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddNICInternalRequest) Reset() { *m = AddNICInternalRequest{} } -func (*AddNICInternalRequest) ProtoMessage() {} -func (*AddNICInternalRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7f2f03dc308add4c, []int{4} -} -func (m *AddNICInternalRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AddNICInternalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AddNICInternalRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AddNICInternalRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddNICInternalRequest.Merge(m, src) -} -func (m *AddNICInternalRequest) XXX_Size() int { - return m.Size() -} -func (m *AddNICInternalRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddNICInternalRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AddNICInternalRequest proto.InternalMessageInfo - -type AddNICInternalResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddNICInternalResponse) Reset() { *m = AddNICInternalResponse{} } -func (*AddNICInternalResponse) ProtoMessage() {} -func (*AddNICInternalResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7f2f03dc308add4c, []int{5} -} -func (m *AddNICInternalResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AddNICInternalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AddNICInternalResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AddNICInternalResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddNICInternalResponse.Merge(m, src) -} -func (m *AddNICInternalResponse) XXX_Size() int { - return m.Size() -} -func (m *AddNICInternalResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AddNICInternalResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AddNICInternalResponse proto.InternalMessageInfo - -type ModifyNICInternalRequest struct { - NicID string `protobuf:"bytes,1,opt,name=nic_id,json=nicId,proto3" json:"nic_id,omitempty"` - Endpoint *types.Any `protobuf:"bytes,2,opt,name=endpoint,proto3" json:"endpoint,omitempty"` - IovPolicySettings *IovSettings `protobuf:"bytes,3,opt,name=iov_policy_settings,json=iovPolicySettings,proto3" json:"iov_policy_settings,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ModifyNICInternalRequest) Reset() { *m = ModifyNICInternalRequest{} } -func (*ModifyNICInternalRequest) ProtoMessage() {} -func (*ModifyNICInternalRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7f2f03dc308add4c, []int{6} -} -func (m *ModifyNICInternalRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ModifyNICInternalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ModifyNICInternalRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ModifyNICInternalRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ModifyNICInternalRequest.Merge(m, src) -} -func (m *ModifyNICInternalRequest) XXX_Size() int { - return m.Size() -} -func (m *ModifyNICInternalRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ModifyNICInternalRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ModifyNICInternalRequest proto.InternalMessageInfo - -type ModifyNICInternalResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ModifyNICInternalResponse) Reset() { *m = ModifyNICInternalResponse{} } -func (*ModifyNICInternalResponse) ProtoMessage() {} -func (*ModifyNICInternalResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7f2f03dc308add4c, []int{7} -} -func (m *ModifyNICInternalResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ModifyNICInternalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ModifyNICInternalResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ModifyNICInternalResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ModifyNICInternalResponse.Merge(m, src) -} -func (m *ModifyNICInternalResponse) XXX_Size() int { - return m.Size() -} -func (m *ModifyNICInternalResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ModifyNICInternalResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ModifyNICInternalResponse proto.InternalMessageInfo - -type DeleteNICInternalRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - NicID string `protobuf:"bytes,2,opt,name=nic_id,json=nicId,proto3" json:"nic_id,omitempty"` - Endpoint *types.Any `protobuf:"bytes,3,opt,name=endpoint,proto3" json:"endpoint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteNICInternalRequest) Reset() { *m = DeleteNICInternalRequest{} } -func (*DeleteNICInternalRequest) ProtoMessage() {} -func (*DeleteNICInternalRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7f2f03dc308add4c, []int{8} -} -func (m *DeleteNICInternalRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteNICInternalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteNICInternalRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteNICInternalRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteNICInternalRequest.Merge(m, src) -} -func (m *DeleteNICInternalRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteNICInternalRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteNICInternalRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteNICInternalRequest proto.InternalMessageInfo - -type DeleteNICInternalResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteNICInternalResponse) Reset() { *m = DeleteNICInternalResponse{} } -func (*DeleteNICInternalResponse) ProtoMessage() {} -func (*DeleteNICInternalResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7f2f03dc308add4c, []int{9} -} -func (m *DeleteNICInternalResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteNICInternalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteNICInternalResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteNICInternalResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteNICInternalResponse.Merge(m, src) -} -func (m *DeleteNICInternalResponse) XXX_Size() int { - return m.Size() -} -func (m *DeleteNICInternalResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteNICInternalResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteNICInternalResponse proto.InternalMessageInfo - -type IovSettings struct { - IovOffloadWeight uint32 `protobuf:"varint,1,opt,name=IovOffloadWeight,proto3" json:"IovOffloadWeight,omitempty"` - QueuePairsRequested uint32 `protobuf:"varint,2,opt,name=QueuePairsRequested,proto3" json:"QueuePairsRequested,omitempty"` - InterruptModeration uint32 `protobuf:"varint,3,opt,name=InterruptModeration,proto3" json:"InterruptModeration,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IovSettings) Reset() { *m = IovSettings{} } -func (*IovSettings) ProtoMessage() {} -func (*IovSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_7f2f03dc308add4c, []int{10} -} -func (m *IovSettings) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IovSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IovSettings.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *IovSettings) XXX_Merge(src proto.Message) { - xxx_messageInfo_IovSettings.Merge(m, src) -} -func (m *IovSettings) XXX_Size() int { - return m.Size() -} -func (m *IovSettings) XXX_DiscardUnknown() { - xxx_messageInfo_IovSettings.DiscardUnknown(m) -} - -var xxx_messageInfo_IovSettings proto.InternalMessageInfo - -func init() { - proto.RegisterType((*AssignPCIInternalRequest)(nil), "AssignPCIInternalRequest") - proto.RegisterType((*AssignPCIInternalResponse)(nil), "AssignPCIInternalResponse") - proto.RegisterType((*RemovePCIInternalRequest)(nil), "RemovePCIInternalRequest") - proto.RegisterType((*RemovePCIInternalResponse)(nil), "RemovePCIInternalResponse") - proto.RegisterType((*AddNICInternalRequest)(nil), "AddNICInternalRequest") - proto.RegisterType((*AddNICInternalResponse)(nil), "AddNICInternalResponse") - proto.RegisterType((*ModifyNICInternalRequest)(nil), "ModifyNICInternalRequest") - proto.RegisterType((*ModifyNICInternalResponse)(nil), "ModifyNICInternalResponse") - proto.RegisterType((*DeleteNICInternalRequest)(nil), "DeleteNICInternalRequest") - proto.RegisterType((*DeleteNICInternalResponse)(nil), "DeleteNICInternalResponse") - proto.RegisterType((*IovSettings)(nil), "IovSettings") -} - -func init() { - proto.RegisterFile("github.com/Microsoft/hcsshim/internal/computeagent/computeagent.proto", fileDescriptor_7f2f03dc308add4c) -} - -var fileDescriptor_7f2f03dc308add4c = []byte{ - // 655 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x55, 0xcf, 0x6f, 0xd3, 0x30, - 0x14, 0xae, 0x0b, 0xab, 0x56, 0xb7, 0x13, 0xe0, 0x8d, 0x92, 0x64, 0x52, 0x3b, 0xf5, 0x34, 0x38, - 0x38, 0xd3, 0xc6, 0x6d, 0x48, 0xa8, 0x6b, 0x40, 0xf2, 0x61, 0x63, 0x84, 0x03, 0x3f, 0x2e, 0x55, - 0x16, 0xbb, 0xa9, 0xa5, 0xd6, 0x2e, 0x89, 0x13, 0xd1, 0x1b, 0xff, 0x08, 0x42, 0xe2, 0x8c, 0xc4, - 0xbf, 0xb1, 0x23, 0xdc, 0x38, 0x55, 0x2c, 0xfc, 0x23, 0x28, 0x3f, 0x96, 0x0d, 0x48, 0xa4, 0x21, - 0x71, 0x80, 0x9b, 0xfd, 0x9e, 0xfd, 0xf2, 0x7d, 0xef, 0x7d, 0xfe, 0x02, 0x1f, 0x79, 0x5c, 0x4d, - 0xc2, 0x13, 0xec, 0xca, 0x99, 0x79, 0xc8, 0x5d, 0x5f, 0x06, 0x72, 0xac, 0xcc, 0x89, 0x1b, 0x04, - 0x13, 0x3e, 0x33, 0xb9, 0x50, 0xcc, 0x17, 0xce, 0xd4, 0x74, 0xe5, 0x6c, 0x1e, 0x2a, 0xe6, 0x78, - 0x4c, 0xa8, 0x9f, 0x36, 0x78, 0xee, 0x4b, 0x25, 0x8d, 0x0d, 0x4f, 0x7a, 0x32, 0x5d, 0x9a, 0xc9, - 0x2a, 0x8f, 0xea, 0x9e, 0x94, 0xde, 0x94, 0x99, 0xe9, 0xee, 0x24, 0x1c, 0x9b, 0x8e, 0x58, 0x64, - 0xa9, 0xfe, 0x17, 0x00, 0xb5, 0x41, 0x10, 0x70, 0x4f, 0x1c, 0x0f, 0x09, 0xc9, 0x3f, 0x63, 0xb3, - 0xd7, 0x21, 0x0b, 0x14, 0xda, 0x85, 0x6d, 0x57, 0x0a, 0xe5, 0x70, 0xc1, 0xfc, 0x11, 0xa7, 0x1a, - 0xd8, 0x02, 0xdb, 0xcd, 0x83, 0x1b, 0xf1, 0xb2, 0xd7, 0x1a, 0x9e, 0xc7, 0x89, 0x65, 0xb7, 0x8a, - 0x43, 0x84, 0xa2, 0xbb, 0xb0, 0x49, 0x59, 0xc4, 0x5d, 0x96, 0x5c, 0xa8, 0xa7, 0x17, 0xda, 0xf1, - 0xb2, 0xb7, 0x6a, 0xa5, 0x41, 0x62, 0xd9, 0xab, 0x59, 0x9a, 0x50, 0x74, 0x1f, 0x76, 0x22, 0xee, - 0xab, 0xd0, 0x99, 0x8e, 0xc6, 0xa1, 0x70, 0x15, 0x97, 0x62, 0xc4, 0x05, 0x65, 0x6f, 0xb4, 0x6b, - 0x5b, 0x60, 0x7b, 0xcd, 0xde, 0xc8, 0xb3, 0x8f, 0xf3, 0x24, 0x49, 0x72, 0x68, 0x0b, 0x36, 0x04, - 0x77, 0x93, 0xea, 0xd7, 0xd3, 0xea, 0xcd, 0x78, 0xd9, 0x5b, 0x39, 0xe2, 0x2e, 0xb1, 0xec, 0x15, - 0xc1, 0x5d, 0x42, 0xfb, 0x7b, 0x50, 0x2f, 0xa1, 0x14, 0xcc, 0xa5, 0x08, 0x18, 0xea, 0xc0, 0x7a, - 0xc1, 0xa4, 0x11, 0x2f, 0x7b, 0x75, 0x62, 0xd9, 0x75, 0x4e, 0xfb, 0x9f, 0x00, 0xd4, 0x6c, 0x36, - 0x93, 0x11, 0xfb, 0x4f, 0x1a, 0xd1, 0xdf, 0x84, 0x7a, 0x09, 0xe0, 0x8c, 0x66, 0xff, 0x3d, 0x80, - 0xb7, 0x07, 0x94, 0x1e, 0x91, 0xe1, 0xdf, 0xe0, 0x72, 0xd1, 0xf3, 0x7a, 0x79, 0xcf, 0xd1, 0x0e, - 0x5c, 0x65, 0x82, 0xce, 0x25, 0x17, 0x2a, 0x05, 0xdd, 0xda, 0xdd, 0xc0, 0x99, 0xea, 0xf0, 0xb9, - 0xea, 0xf0, 0x40, 0x2c, 0xec, 0xe2, 0x54, 0x5f, 0x83, 0x9d, 0x5f, 0x01, 0xe6, 0xd8, 0x3f, 0x02, - 0xa8, 0x1d, 0x4a, 0xca, 0xc7, 0x8b, 0x12, 0xf8, 0x17, 0x50, 0xc0, 0x15, 0xa0, 0xd4, 0xaf, 0x02, - 0x05, 0x3d, 0x80, 0xeb, 0x5c, 0x46, 0xa3, 0xb9, 0x9c, 0x72, 0x77, 0x31, 0x0a, 0x98, 0x52, 0x5c, - 0x78, 0x41, 0xce, 0xa3, 0x8d, 0x89, 0x8c, 0x9e, 0xe5, 0x31, 0xfb, 0x16, 0x97, 0xd1, 0x71, 0x7a, - 0xee, 0x3c, 0x94, 0xcc, 0xa1, 0x04, 0x6d, 0xce, 0xe5, 0x03, 0x80, 0x9a, 0xc5, 0xa6, 0x4c, 0xb1, - 0x7f, 0x78, 0x14, 0x9b, 0x50, 0x2f, 0xc1, 0x98, 0x33, 0x78, 0x07, 0x60, 0xeb, 0x52, 0x07, 0xd0, - 0x3d, 0x78, 0x93, 0xc8, 0xe8, 0xc9, 0x78, 0x3c, 0x95, 0x0e, 0x7d, 0xce, 0xb8, 0x37, 0x51, 0x29, - 0xf0, 0x35, 0xfb, 0xb7, 0x38, 0xda, 0x81, 0xeb, 0x4f, 0x43, 0x16, 0xb2, 0x63, 0x87, 0xfb, 0x41, - 0xce, 0x9a, 0x65, 0xc8, 0xd7, 0xec, 0xb2, 0x54, 0x72, 0x23, 0x45, 0xe0, 0x87, 0x73, 0x75, 0x28, - 0x29, 0xf3, 0x9d, 0x44, 0xf0, 0xf9, 0x3b, 0x28, 0x4b, 0xed, 0x7e, 0xaf, 0xc3, 0xf6, 0x30, 0x73, - 0xc2, 0x41, 0xe2, 0x84, 0x68, 0x1f, 0x36, 0x32, 0x61, 0xa1, 0x0e, 0x2e, 0x7d, 0x02, 0xc6, 0x1d, - 0x5c, 0xa1, 0xbc, 0x1a, 0xb2, 0x60, 0xb3, 0x18, 0x26, 0xd2, 0x71, 0x95, 0x0c, 0x0d, 0x03, 0x57, - 0xcf, 0x3c, 0xad, 0x52, 0x34, 0x14, 0xe9, 0xb8, 0x4a, 0x00, 0x86, 0x81, 0xab, 0xfb, 0x9e, 0x56, - 0x29, 0x7c, 0x0c, 0xe9, 0xb8, 0xca, 0xa6, 0x0d, 0x03, 0x57, 0xda, 0x5d, 0x56, 0xa5, 0xb0, 0x09, - 0xa4, 0xe3, 0x2a, 0x8f, 0x33, 0x0c, 0x5c, 0xed, 0x26, 0xb5, 0x83, 0x97, 0xa7, 0x67, 0xdd, 0xda, - 0xd7, 0xb3, 0x6e, 0xed, 0x6d, 0xdc, 0x05, 0xa7, 0x71, 0x17, 0x7c, 0x8e, 0xbb, 0xe0, 0x5b, 0xdc, - 0x05, 0xaf, 0x1e, 0xfe, 0xf9, 0x9f, 0x6b, 0xff, 0xf2, 0xe6, 0x45, 0xed, 0xa4, 0x91, 0xea, 0x72, - 0xef, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe4, 0x25, 0x88, 0x14, 0x05, 0x07, 0x00, 0x00, -} - -func (m *AssignPCIInternalRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AssignPCIInternalRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AssignPCIInternalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.NicID) > 0 { - i -= len(m.NicID) - copy(dAtA[i:], m.NicID) - i = encodeVarintComputeagent(dAtA, i, uint64(len(m.NicID))) - i-- - dAtA[i] = 0x22 - } - if m.VirtualFunctionIndex != 0 { - i = encodeVarintComputeagent(dAtA, i, uint64(m.VirtualFunctionIndex)) - i-- - dAtA[i] = 0x18 - } - if len(m.DeviceID) > 0 { - i -= len(m.DeviceID) - copy(dAtA[i:], m.DeviceID) - i = encodeVarintComputeagent(dAtA, i, uint64(len(m.DeviceID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintComputeagent(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AssignPCIInternalResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AssignPCIInternalResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AssignPCIInternalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintComputeagent(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RemovePCIInternalRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemovePCIInternalRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RemovePCIInternalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.VirtualFunctionIndex != 0 { - i = encodeVarintComputeagent(dAtA, i, uint64(m.VirtualFunctionIndex)) - i-- - dAtA[i] = 0x18 - } - if len(m.DeviceID) > 0 { - i -= len(m.DeviceID) - copy(dAtA[i:], m.DeviceID) - i = encodeVarintComputeagent(dAtA, i, uint64(len(m.DeviceID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintComputeagent(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RemovePCIInternalResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemovePCIInternalResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RemovePCIInternalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *AddNICInternalRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddNICInternalRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AddNICInternalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Endpoint != nil { - { - size, err := m.Endpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintComputeagent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.NicID) > 0 { - i -= len(m.NicID) - copy(dAtA[i:], m.NicID) - i = encodeVarintComputeagent(dAtA, i, uint64(len(m.NicID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintComputeagent(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AddNICInternalResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddNICInternalResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AddNICInternalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *ModifyNICInternalRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ModifyNICInternalRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ModifyNICInternalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.IovPolicySettings != nil { - { - size, err := m.IovPolicySettings.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintComputeagent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Endpoint != nil { - { - size, err := m.Endpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintComputeagent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.NicID) > 0 { - i -= len(m.NicID) - copy(dAtA[i:], m.NicID) - i = encodeVarintComputeagent(dAtA, i, uint64(len(m.NicID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ModifyNICInternalResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ModifyNICInternalResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ModifyNICInternalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *DeleteNICInternalRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteNICInternalRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteNICInternalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Endpoint != nil { - { - size, err := m.Endpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintComputeagent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.NicID) > 0 { - i -= len(m.NicID) - copy(dAtA[i:], m.NicID) - i = encodeVarintComputeagent(dAtA, i, uint64(len(m.NicID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintComputeagent(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteNICInternalResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteNICInternalResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteNICInternalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *IovSettings) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IovSettings) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IovSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.InterruptModeration != 0 { - i = encodeVarintComputeagent(dAtA, i, uint64(m.InterruptModeration)) - i-- - dAtA[i] = 0x18 - } - if m.QueuePairsRequested != 0 { - i = encodeVarintComputeagent(dAtA, i, uint64(m.QueuePairsRequested)) - i-- - dAtA[i] = 0x10 - } - if m.IovOffloadWeight != 0 { - i = encodeVarintComputeagent(dAtA, i, uint64(m.IovOffloadWeight)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintComputeagent(dAtA []byte, offset int, v uint64) int { - offset -= sovComputeagent(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AssignPCIInternalRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovComputeagent(uint64(l)) - } - l = len(m.DeviceID) - if l > 0 { - n += 1 + l + sovComputeagent(uint64(l)) - } - if m.VirtualFunctionIndex != 0 { - n += 1 + sovComputeagent(uint64(m.VirtualFunctionIndex)) - } - l = len(m.NicID) - if l > 0 { - n += 1 + l + sovComputeagent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AssignPCIInternalResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovComputeagent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RemovePCIInternalRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovComputeagent(uint64(l)) - } - l = len(m.DeviceID) - if l > 0 { - n += 1 + l + sovComputeagent(uint64(l)) - } - if m.VirtualFunctionIndex != 0 { - n += 1 + sovComputeagent(uint64(m.VirtualFunctionIndex)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RemovePCIInternalResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AddNICInternalRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovComputeagent(uint64(l)) - } - l = len(m.NicID) - if l > 0 { - n += 1 + l + sovComputeagent(uint64(l)) - } - if m.Endpoint != nil { - l = m.Endpoint.Size() - n += 1 + l + sovComputeagent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AddNICInternalResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ModifyNICInternalRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.NicID) - if l > 0 { - n += 1 + l + sovComputeagent(uint64(l)) - } - if m.Endpoint != nil { - l = m.Endpoint.Size() - n += 1 + l + sovComputeagent(uint64(l)) - } - if m.IovPolicySettings != nil { - l = m.IovPolicySettings.Size() - n += 1 + l + sovComputeagent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ModifyNICInternalResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteNICInternalRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovComputeagent(uint64(l)) - } - l = len(m.NicID) - if l > 0 { - n += 1 + l + sovComputeagent(uint64(l)) - } - if m.Endpoint != nil { - l = m.Endpoint.Size() - n += 1 + l + sovComputeagent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteNICInternalResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *IovSettings) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IovOffloadWeight != 0 { - n += 1 + sovComputeagent(uint64(m.IovOffloadWeight)) - } - if m.QueuePairsRequested != 0 { - n += 1 + sovComputeagent(uint64(m.QueuePairsRequested)) - } - if m.InterruptModeration != 0 { - n += 1 + sovComputeagent(uint64(m.InterruptModeration)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovComputeagent(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozComputeagent(x uint64) (n int) { - return sovComputeagent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AssignPCIInternalRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AssignPCIInternalRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `DeviceID:` + fmt.Sprintf("%v", this.DeviceID) + `,`, - `VirtualFunctionIndex:` + fmt.Sprintf("%v", this.VirtualFunctionIndex) + `,`, - `NicID:` + fmt.Sprintf("%v", this.NicID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *AssignPCIInternalResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AssignPCIInternalResponse{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RemovePCIInternalRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RemovePCIInternalRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `DeviceID:` + fmt.Sprintf("%v", this.DeviceID) + `,`, - `VirtualFunctionIndex:` + fmt.Sprintf("%v", this.VirtualFunctionIndex) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RemovePCIInternalResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RemovePCIInternalResponse{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *AddNICInternalRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AddNICInternalRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `NicID:` + fmt.Sprintf("%v", this.NicID) + `,`, - `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *AddNICInternalResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AddNICInternalResponse{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ModifyNICInternalRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ModifyNICInternalRequest{`, - `NicID:` + fmt.Sprintf("%v", this.NicID) + `,`, - `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Any", "types.Any", 1) + `,`, - `IovPolicySettings:` + strings.Replace(this.IovPolicySettings.String(), "IovSettings", "IovSettings", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ModifyNICInternalResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ModifyNICInternalResponse{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteNICInternalRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteNICInternalRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `NicID:` + fmt.Sprintf("%v", this.NicID) + `,`, - `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteNICInternalResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteNICInternalResponse{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *IovSettings) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IovSettings{`, - `IovOffloadWeight:` + fmt.Sprintf("%v", this.IovOffloadWeight) + `,`, - `QueuePairsRequested:` + fmt.Sprintf("%v", this.QueuePairsRequested) + `,`, - `InterruptModeration:` + fmt.Sprintf("%v", this.InterruptModeration) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringComputeagent(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} - -type ComputeAgentService interface { - AddNIC(ctx context.Context, req *AddNICInternalRequest) (*AddNICInternalResponse, error) - ModifyNIC(ctx context.Context, req *ModifyNICInternalRequest) (*ModifyNICInternalResponse, error) - DeleteNIC(ctx context.Context, req *DeleteNICInternalRequest) (*DeleteNICInternalResponse, error) - AssignPCI(ctx context.Context, req *AssignPCIInternalRequest) (*AssignPCIInternalResponse, error) - RemovePCI(ctx context.Context, req *RemovePCIInternalRequest) (*RemovePCIInternalResponse, error) -} - -func RegisterComputeAgentService(srv *github_com_containerd_ttrpc.Server, svc ComputeAgentService) { - srv.Register("ComputeAgent", map[string]github_com_containerd_ttrpc.Method{ - "AddNIC": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req AddNICInternalRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.AddNIC(ctx, &req) - }, - "ModifyNIC": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ModifyNICInternalRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.ModifyNIC(ctx, &req) - }, - "DeleteNIC": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req DeleteNICInternalRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.DeleteNIC(ctx, &req) - }, - "AssignPCI": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req AssignPCIInternalRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.AssignPCI(ctx, &req) - }, - "RemovePCI": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req RemovePCIInternalRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.RemovePCI(ctx, &req) - }, - }) -} - -type computeAgentClient struct { - client *github_com_containerd_ttrpc.Client -} - -func NewComputeAgentClient(client *github_com_containerd_ttrpc.Client) ComputeAgentService { - return &computeAgentClient{ - client: client, - } -} - -func (c *computeAgentClient) AddNIC(ctx context.Context, req *AddNICInternalRequest) (*AddNICInternalResponse, error) { - var resp AddNICInternalResponse - if err := c.client.Call(ctx, "ComputeAgent", "AddNIC", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *computeAgentClient) ModifyNIC(ctx context.Context, req *ModifyNICInternalRequest) (*ModifyNICInternalResponse, error) { - var resp ModifyNICInternalResponse - if err := c.client.Call(ctx, "ComputeAgent", "ModifyNIC", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *computeAgentClient) DeleteNIC(ctx context.Context, req *DeleteNICInternalRequest) (*DeleteNICInternalResponse, error) { - var resp DeleteNICInternalResponse - if err := c.client.Call(ctx, "ComputeAgent", "DeleteNIC", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *computeAgentClient) AssignPCI(ctx context.Context, req *AssignPCIInternalRequest) (*AssignPCIInternalResponse, error) { - var resp AssignPCIInternalResponse - if err := c.client.Call(ctx, "ComputeAgent", "AssignPCI", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *computeAgentClient) RemovePCI(ctx context.Context, req *RemovePCIInternalRequest) (*RemovePCIInternalResponse, error) { - var resp RemovePCIInternalResponse - if err := c.client.Call(ctx, "ComputeAgent", "RemovePCI", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} -func (m *AssignPCIInternalRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AssignPCIInternalRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AssignPCIInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeviceID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeviceID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field VirtualFunctionIndex", wireType) - } - m.VirtualFunctionIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.VirtualFunctionIndex |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NicID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NicID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipComputeagent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthComputeagent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AssignPCIInternalResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AssignPCIInternalResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AssignPCIInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipComputeagent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthComputeagent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemovePCIInternalRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemovePCIInternalRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemovePCIInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeviceID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeviceID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field VirtualFunctionIndex", wireType) - } - m.VirtualFunctionIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.VirtualFunctionIndex |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipComputeagent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthComputeagent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemovePCIInternalResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemovePCIInternalResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemovePCIInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipComputeagent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthComputeagent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddNICInternalRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddNICInternalRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddNICInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NicID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NicID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Endpoint == nil { - m.Endpoint = &types.Any{} - } - if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipComputeagent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthComputeagent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddNICInternalResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddNICInternalResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddNICInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipComputeagent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthComputeagent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ModifyNICInternalRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ModifyNICInternalRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ModifyNICInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NicID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NicID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Endpoint == nil { - m.Endpoint = &types.Any{} - } - if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IovPolicySettings", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.IovPolicySettings == nil { - m.IovPolicySettings = &IovSettings{} - } - if err := m.IovPolicySettings.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipComputeagent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthComputeagent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ModifyNICInternalResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ModifyNICInternalResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ModifyNICInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipComputeagent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthComputeagent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteNICInternalRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteNICInternalRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteNICInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NicID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NicID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthComputeagent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthComputeagent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Endpoint == nil { - m.Endpoint = &types.Any{} - } - if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipComputeagent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthComputeagent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteNICInternalResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteNICInternalResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteNICInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipComputeagent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthComputeagent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IovSettings) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IovSettings: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IovSettings: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IovOffloadWeight", wireType) - } - m.IovOffloadWeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IovOffloadWeight |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field QueuePairsRequested", wireType) - } - m.QueuePairsRequested = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.QueuePairsRequested |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InterruptModeration", wireType) - } - m.InterruptModeration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowComputeagent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InterruptModeration |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipComputeagent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthComputeagent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipComputeagent(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowComputeagent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowComputeagent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowComputeagent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthComputeagent - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupComputeagent - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthComputeagent - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthComputeagent = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowComputeagent = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupComputeagent = fmt.Errorf("proto: unexpected end of group") -) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/computeagent/computeagent.proto b/test/vendor/github.com/Microsoft/hcsshim/internal/computeagent/computeagent.proto deleted file mode 100644 index 51127ffd01..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/computeagent/computeagent.proto +++ /dev/null @@ -1,63 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/Microsoft/hcsshim/internal/computeagent;computeagent"; - -import weak "gogoproto/gogo.proto"; -import "google/protobuf/any.proto"; - -service ComputeAgent{ - rpc AddNIC(AddNICInternalRequest) returns (AddNICInternalResponse) {} - rpc ModifyNIC(ModifyNICInternalRequest) returns (ModifyNICInternalResponse) {} - rpc DeleteNIC(DeleteNICInternalRequest) returns (DeleteNICInternalResponse) {} - rpc AssignPCI(AssignPCIInternalRequest) returns (AssignPCIInternalResponse) {} - rpc RemovePCI(RemovePCIInternalRequest) returns (RemovePCIInternalResponse) {} -} - -message AssignPCIInternalRequest { - string container_id = 1; - string device_id = 2; - uint32 virtual_function_index = 3; - string nic_id = 4; -} - -message AssignPCIInternalResponse { - string id = 1; -} - -message RemovePCIInternalRequest { - string container_id = 1; - string device_id = 2; - uint32 virtual_function_index = 3; -} - -message RemovePCIInternalResponse {} - -message AddNICInternalRequest { - string container_id = 1; - string nic_id = 2; - google.protobuf.Any endpoint = 3; -} - -message AddNICInternalResponse {} - -message ModifyNICInternalRequest { - string nic_id = 1; - google.protobuf.Any endpoint = 2; - IovSettings iov_policy_settings = 3; -} - -message ModifyNICInternalResponse {} - -message DeleteNICInternalRequest { - string container_id = 1; - string nic_id = 2; - google.protobuf.Any endpoint = 3; -} - -message DeleteNICInternalResponse {} - -message IovSettings { - uint32 IovOffloadWeight = 1; - uint32 QueuePairsRequested = 2; - uint32 InterruptModeration = 3; -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/computeagent/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/computeagent/doc.go deleted file mode 100644 index 7df98b60c0..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/computeagent/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Package computeagent contains the proto and compiled go files for the compute -// agent service. -// -// A mock service under `mock` is used for unit testing the various services -// used for ncproxy. -// -// The mock service is compiled using the following command: -// -// mockgen -source="computeagent.pb.go" -package="computeagent_mock" > mock\computeagent_mock.pb.go -package computeagent diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/copyfile/copyfile.go b/test/vendor/github.com/Microsoft/hcsshim/internal/copyfile/copyfile.go deleted file mode 100644 index ea3e65b543..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/copyfile/copyfile.go +++ /dev/null @@ -1,54 +0,0 @@ -//go:build windows - -package copyfile - -import ( - "context" - "fmt" - "syscall" - "unsafe" - - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procCopyFileW = modkernel32.NewProc("CopyFileW") -) - -// CopyFile is a utility for copying a file using CopyFileW win32 API for -// performance. -func CopyFile(ctx context.Context, srcFile, destFile string, overwrite bool) (err error) { - ctx, span := oc.StartSpan(ctx, "copyfile::CopyFile") //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("srcFile", srcFile), - trace.StringAttribute("destFile", destFile), - trace.BoolAttribute("overwrite", overwrite)) - - var bFailIfExists uint32 = 1 - if overwrite { - bFailIfExists = 0 - } - - lpExistingFileName, err := syscall.UTF16PtrFromString(srcFile) - if err != nil { - return err - } - lpNewFileName, err := syscall.UTF16PtrFromString(destFile) - if err != nil { - return err - } - r1, _, err := syscall.Syscall( - procCopyFileW.Addr(), - 3, - uintptr(unsafe.Pointer(lpExistingFileName)), - uintptr(unsafe.Pointer(lpNewFileName)), - uintptr(bFailIfExists)) - if r1 == 0 { - return fmt.Errorf("failed CopyFileW Win32 call from '%s' to '%s': %s", srcFile, destFile, err) - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/copyfile/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/copyfile/doc.go deleted file mode 100644 index a2812a6ee8..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/copyfile/doc.go +++ /dev/null @@ -1 +0,0 @@ -package copyfile diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go deleted file mode 100644 index c6eeb167b9..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go +++ /dev/null @@ -1,93 +0,0 @@ -//go:build windows - -package cow - -import ( - "context" - "io" - - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// Process is the interface for an OS process running in a container or utility VM. -type Process interface { - // Close releases resources associated with the process and closes the - // writer and readers returned by Stdio. Depending on the implementation, - // this may also terminate the process. - Close() error - // CloseStdin causes the process's stdin handle to receive EOF/EPIPE/whatever - // is appropriate to indicate that no more data is available. - CloseStdin(ctx context.Context) error - // CloseStdout closes the stdout connection to the process. It is used to indicate - // that we are done receiving output on the shim side. - CloseStdout(ctx context.Context) error - // CloseStderr closes the stderr connection to the process. It is used to indicate - // that we are done receiving output on the shim side. - CloseStderr(ctx context.Context) error - // Pid returns the process ID. - Pid() int - // Stdio returns the stdio streams for a process. These may be nil if a stream - // was not requested during CreateProcess. - Stdio() (_ io.Writer, _ io.Reader, _ io.Reader) - // ResizeConsole resizes the virtual terminal associated with the process. - ResizeConsole(ctx context.Context, width, height uint16) error - // Kill sends a SIGKILL or equivalent signal to the process and returns whether - // the signal was delivered. It does not wait for the process to terminate. - Kill(ctx context.Context) (bool, error) - // Signal sends a signal to the process and returns whether the signal was - // delivered. The input is OS specific (either - // guestrequest.SignalProcessOptionsWCOW or - // guestrequest.SignalProcessOptionsLCOW). It does not wait for the process - // to terminate. - Signal(ctx context.Context, options interface{}) (bool, error) - // Wait waits for the process to complete, or for a connection to the process to be - // terminated by some error condition (including calling Close). - Wait() error - // ExitCode returns the exit code of the process. Returns an error if the process is - // not running. - ExitCode() (int, error) -} - -// ProcessHost is the interface for creating processes. -type ProcessHost interface { - // CreateProcess creates a process. The configuration is host specific - // (either hcsschema.ProcessParameters or lcow.ProcessParameters). - CreateProcess(ctx context.Context, config interface{}) (Process, error) - // OS returns the host's operating system, "linux" or "windows". - OS() string - // IsOCI specifies whether this is an OCI-compliant process host. If true, - // then the configuration passed to CreateProcess should have an OCI process - // spec (or nil if this is the initial process in an OCI container). - // Otherwise, it should have the HCS-specific process parameters. - IsOCI() bool -} - -// Container is the interface for container objects, either running on the host or -// in a utility VM. -type Container interface { - ProcessHost - // Close releases the resources associated with the container. Depending on - // the implementation, this may also terminate the container. - Close() error - // ID returns the container ID. - ID() string - // Properties returns the requested container properties targeting a V1 schema container. - Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) - // PropertiesV2 returns the requested container properties targeting a V2 schema container. - PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) - // Start starts a container. - Start(ctx context.Context) error - // Shutdown sends a shutdown request to the container (but does not wait for - // the shutdown to complete). - Shutdown(ctx context.Context) error - // Terminate sends a terminate request to the container (but does not wait - // for the terminate to complete). - Terminate(ctx context.Context) error - // Wait waits for the container to terminate, or for the connection to the - // container to be terminated by some error condition (including calling - // Close). - Wait() error - // Modify sends a request to modify container resources - Modify(ctx context.Context, config interface{}) error -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/cpugroup/cpugroup.go b/test/vendor/github.com/Microsoft/hcsshim/internal/cpugroup/cpugroup.go deleted file mode 100644 index 3abaa9c439..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/cpugroup/cpugroup.go +++ /dev/null @@ -1,78 +0,0 @@ -//go:build windows - -package cpugroup - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/Microsoft/hcsshim/internal/hcs" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/pkg/errors" -) - -const NullGroupID = "00000000-0000-0000-0000-000000000000" - -// ErrHVStatusInvalidCPUGroupState corresponds to the internal error code for HV_STATUS_INVALID_CPU_GROUP_STATE -var ErrHVStatusInvalidCPUGroupState = errors.New("The hypervisor could not perform the operation because the CPU group is entering or in an invalid state.") - -// Delete deletes the cpugroup from the host -func Delete(ctx context.Context, id string) error { - operation := hcsschema.DeleteGroup - details := hcsschema.DeleteGroupOperation{ - GroupId: id, - } - - return modifyCPUGroupRequest(ctx, operation, details) -} - -// modifyCPUGroupRequest is a helper function for making modify calls to a cpugroup -func modifyCPUGroupRequest(ctx context.Context, operation hcsschema.CPUGroupOperation, details interface{}) error { - req := hcsschema.ModificationRequest{ - PropertyType: hcsschema.PTCPUGroup, - Settings: &hcsschema.HostProcessorModificationRequest{ - Operation: operation, - OperationDetails: details, - }, - } - - return hcs.ModifyServiceSettings(ctx, req) -} - -// Create creates a new cpugroup on the host with a prespecified id -func Create(ctx context.Context, id string, logicalProcessors []uint32) error { - operation := hcsschema.CreateGroup - details := &hcsschema.CreateGroupOperation{ - GroupId: strings.ToLower(id), - LogicalProcessors: logicalProcessors, - LogicalProcessorCount: uint32(len(logicalProcessors)), - } - if err := modifyCPUGroupRequest(ctx, operation, details); err != nil { - return errors.Wrapf(err, "failed to make cpugroups CreateGroup request for details %+v", details) - } - return nil -} - -// GetCPUGroupConfig finds the cpugroup config information for group with `id` -func GetCPUGroupConfig(ctx context.Context, id string) (*hcsschema.CpuGroupConfig, error) { - query := hcsschema.PropertyQuery{ - PropertyTypes: []hcsschema.PropertyType{hcsschema.PTCPUGroup}, - } - cpuGroupsPresent, err := hcs.GetServiceProperties(ctx, query) - if err != nil { - return nil, err - } - groupConfigs := &hcsschema.CpuGroupConfigurations{} - if err := json.Unmarshal(cpuGroupsPresent.Properties[0], groupConfigs); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal host cpugroups") - } - - for _, c := range groupConfigs.CpuGroups { - if strings.EqualFold(c.GroupId, id) { - return &c, nil - } - } - return nil, fmt.Errorf("no cpugroup exists with id %v", id) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/cpugroup/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/cpugroup/doc.go deleted file mode 100644 index a2c3357977..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/cpugroup/doc.go +++ /dev/null @@ -1 +0,0 @@ -package cpugroup diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/credentials/credentials.go b/test/vendor/github.com/Microsoft/hcsshim/internal/credentials/credentials.go deleted file mode 100644 index d9ec9a3490..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/credentials/credentials.go +++ /dev/null @@ -1,130 +0,0 @@ -//go:build windows -// +build windows - -package credentials - -import ( - "context" - "encoding/json" - "errors" - "fmt" - - "github.com/Microsoft/hcsshim/internal/hcs" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" -) - -// Container Credential Guard is in HCS's own words "The solution to -// allowing windows containers to have access to domain credentials for the -// applications running in their corresponding guest." It essentially acts as -// a way to temporarily Active Directory join a given container with a Group -// Managed Service Account (GMSA for short) credential specification. -// CCG will launch a process in the host that will act as a middleman for the -// credential passthrough logic. The guest is then configured through registry -// keys to have access to the process in the host. -// A CCG instance needs to be created through various HCS calls and then added to -// the V2 schema container document before being sent to HCS. For V1 HCS schema containers -// setting up instances manually is not needed, the GMSA credential specification -// simply needs to be present in the V1 container document. - -// CCGResource stores the id used when creating a ccg instance. Used when -// closing a container to be able to release the instance. -type CCGResource struct { - // ID of container that instance belongs to. - id string -} - -// Release calls into hcs to remove the ccg instance for the container matching CCGResource.id. -// These do not get cleaned up automatically they MUST be explicitly removed with a call to -// ModifyServiceSettings. The instances will persist unless vmcompute.exe exits or they are removed -// manually as done here. -func (ccgResource *CCGResource) Release(ctx context.Context) error { - if err := removeCredentialGuard(ctx, ccgResource.id); err != nil { - return fmt.Errorf("failed to remove container credential guard instance: %s", err) - } - return nil -} - -// CreateCredentialGuard creates a container credential guard instance and -// returns the state object to be placed in a v2 container doc. -func CreateCredentialGuard(ctx context.Context, id, credSpec string, hypervisorIsolated bool) (*hcsschema.ContainerCredentialGuardInstance, *CCGResource, error) { - log.G(ctx).WithField("containerID", id).Debug("creating container credential guard instance") - // V2 schema ccg setup a little different as its expected to be passed - // through all the way to the gcs. Can no longer be enabled just through - // a single property. The flow is as follows - // ------------------------------------------------------------------------ - // 1. Call HcsModifyServiceSettings with a ModificationRequest set with a - // ContainerCredentialGuardAddInstanceRequest. This is where the cred spec - // gets passed in. Transport either "LRPC" (Argon) or "HvSocket" (Xenon). - // - // 2. Query the instance with a call to HcsGetServiceProperties with the - // PropertyType "ContainerCredentialGuard". This will return all instances - // - // 3. Parse for the id of our container to find which one correlates to the - // container we're building the doc for, then add to the V2 doc. - // - // 4. If xenon container the CCG instance with the Hvsocket service table - // information is expected to be in the Utility VMs doc before being sent - // to HCS for creation. For pod scenarios currently we don't have the OCI - // spec of a container at UVM creation time, therefore the service table entry - // for the CCG instance will have to be hot added. - transport := "LRPC" - if hypervisorIsolated { - transport = "HvSocket" - } - req := hcsschema.ModificationRequest{ - PropertyType: hcsschema.PTContainerCredentialGuard, - Settings: &hcsschema.ContainerCredentialGuardOperationRequest{ - Operation: hcsschema.AddInstance, - OperationDetails: &hcsschema.ContainerCredentialGuardAddInstanceRequest{ - Id: id, - CredentialSpec: credSpec, - Transport: transport, - }, - }, - } - if err := hcs.ModifyServiceSettings(ctx, req); err != nil { - return nil, nil, fmt.Errorf("failed to generate container credential guard instance: %s", err) - } - - q := hcsschema.PropertyQuery{ - PropertyTypes: []hcsschema.PropertyType{hcsschema.PTContainerCredentialGuard}, - } - serviceProps, err := hcs.GetServiceProperties(ctx, q) - if err != nil { - return nil, nil, fmt.Errorf("failed to retrieve container credential guard instances: %s", err) - } - if len(serviceProps.Properties) != 1 { - return nil, nil, errors.New("wrong number of service properties present") - } - - ccgSysInfo := &hcsschema.ContainerCredentialGuardSystemInfo{} - if err := json.Unmarshal(serviceProps.Properties[0], ccgSysInfo); err != nil { - return nil, nil, fmt.Errorf("failed to unmarshal container credential guard instances: %s", err) - } - for _, ccgInstance := range ccgSysInfo.Instances { - if ccgInstance.Id == id { - ccgResource := &CCGResource{ - id, - } - return &ccgInstance, ccgResource, nil - } - } - return nil, nil, fmt.Errorf("failed to find credential guard instance with container ID %s", id) -} - -// Removes a ContainerCredentialGuard instance by container ID. -func removeCredentialGuard(ctx context.Context, id string) error { - log.G(ctx).WithField("containerID", id).Debug("removing container credential guard") - - req := hcsschema.ModificationRequest{ - PropertyType: hcsschema.PTContainerCredentialGuard, - Settings: &hcsschema.ContainerCredentialGuardOperationRequest{ - Operation: hcsschema.RemoveInstance, - OperationDetails: &hcsschema.ContainerCredentialGuardRemoveInstanceRequest{ - Id: id, - }, - }, - } - return hcs.ModifyServiceSettings(ctx, req) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/credentials/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/credentials/doc.go deleted file mode 100644 index cbf23ed082..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/credentials/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package credentials holds the necessary structs and functions for adding -// and removing Container Credential Guard instances (shortened to CCG -// normally) for V2 HCS schema containers. -package credentials diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/devices/assigned_devices.go b/test/vendor/github.com/Microsoft/hcsshim/internal/devices/assigned_devices.go deleted file mode 100644 index 4750776403..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/devices/assigned_devices.go +++ /dev/null @@ -1,107 +0,0 @@ -//go:build windows -// +build windows - -package devices - -import ( - "context" - "fmt" - - "github.com/Microsoft/hcsshim/internal/cmd" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/pkg/errors" -) - -// AddDevice is the api exposed to hcsoci to handle assigning a device on a UVM -// -// `idType` refers to the specified device's type, supported types here are `VPCIDeviceIDType` -// and `VPCIDeviceIDTypeLegacy`. -// -// `deviceID` refers to the specified device's identifier. This must refer to a device instance id -// for hyper-v isolated device assignment. -// -// `deviceUtilPath` refers to the path in the UVM of the device-util tool used for finding the given -// device's location path(s). -// -// Returns the allocated vpci device in `vpci` to be tracked for release by the caller. On failure in -// this function, `vpci` is released and nil is returned for that value. -// -// Returns a slice of strings representing the resulting location path(s) for the specified device. -func AddDevice(ctx context.Context, vm *uvm.UtilityVM, idType, deviceID string, index uint16, deviceUtilPath string) (vpci *uvm.VPCIDevice, locationPaths []string, err error) { - defer func() { - if err != nil && vpci != nil { - // best effort clean up allocated resource on failure - if releaseErr := vpci.Release(ctx); releaseErr != nil { - log.G(ctx).WithError(releaseErr).Error("failed to release container resource") - } - vpci = nil - } - }() - if idType == uvm.VPCIDeviceIDType || idType == uvm.VPCIDeviceIDTypeLegacy { - vpci, err = vm.AssignDevice(ctx, deviceID, index, "") - if err != nil { - return vpci, nil, errors.Wrapf(err, "failed to assign device %s of type %s to pod %s", deviceID, idType, vm.ID()) - } - vmBusInstanceID := vm.GetAssignedDeviceVMBUSInstanceID(vpci.VMBusGUID) - log.G(ctx).WithField("vmbus id", vmBusInstanceID).Info("vmbus instance ID") - - locationPaths, err = getChildrenDeviceLocationPaths(ctx, vm, vmBusInstanceID, deviceUtilPath) - return vpci, locationPaths, err - } - - return vpci, nil, fmt.Errorf("device type %s for device %s is not supported in windows", idType, deviceID) -} - -// getChildrenDeviceLocationPaths queries the UVM with the device-util tool with the formatted -// parent bus device for the children devices' location paths from the uvm's view. -// Returns a slice of strings representing the resulting children location paths -func getChildrenDeviceLocationPaths(ctx context.Context, vm *uvm.UtilityVM, vmBusInstanceID string, deviceUtilPath string) ([]string, error) { - p, l, err := cmd.CreateNamedPipeListener() - if err != nil { - return nil, err - } - defer l.Close() - - var pipeResults []string - errChan := make(chan error) - - go readCsPipeOutput(l, errChan, &pipeResults) - - args := createDeviceUtilChildrenCommand(deviceUtilPath, vmBusInstanceID) - cmdReq := &cmd.CmdProcessRequest{ - Args: args, - Stdout: p, - } - exitCode, err := cmd.ExecInUvm(ctx, vm, cmdReq) - if err != nil { - return nil, errors.Wrapf(err, "failed to find devices with exit code %d", exitCode) - } - - // wait to finish parsing stdout results - select { - case err := <-errChan: - if err != nil { - return nil, err - } - case <-ctx.Done(): - return nil, ctx.Err() - } - - return pipeResults, nil -} - -// createDeviceUtilChildrenCommand constructs a device-util command to query the UVM for -// device information -// -// `deviceUtilPath` is the UVM path to device-util -// -// `vmBusInstanceID` is a string of the vmbus instance ID already assigned to the UVM -// -// Returns a slice of strings that represent the location paths in the UVM of the -// target devices -func createDeviceUtilChildrenCommand(deviceUtilPath string, vmBusInstanceID string) []string { - parentIDsFlag := fmt.Sprintf("--parentID=%s", vmBusInstanceID) - args := []string{deviceUtilPath, "children", parentIDsFlag, "--property=location"} - return args -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/devices/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/devices/doc.go deleted file mode 100644 index c1c721e298..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/devices/doc.go +++ /dev/null @@ -1 +0,0 @@ -package devices diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/devices/drivers.go b/test/vendor/github.com/Microsoft/hcsshim/internal/devices/drivers.go deleted file mode 100644 index 0dae33c963..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/devices/drivers.go +++ /dev/null @@ -1,96 +0,0 @@ -//go:build windows -// +build windows - -package devices - -import ( - "context" - "fmt" - - "github.com/Microsoft/hcsshim/internal/cmd" - "github.com/Microsoft/hcsshim/internal/guestpath" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/resources" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/pkg/errors" -) - -// InstallKernelDriver mounts a specified kernel driver, then installs it in the UVM. -// -// `driver` is a directory path on the host that contains driver files for standard installation. -// For windows this means files for pnp installation (.inf, .cat, .sys, .cert files). -// For linux this means a vhd file that contains the drivers under /lib/modules/`uname -r` for use -// with depmod and modprobe. -// -// Returns a ResourceCloser for the added mount. On failure, the mounted share will be released, -// the returned ResourceCloser will be nil, and an error will be returned. -func InstallKernelDriver(ctx context.Context, vm *uvm.UtilityVM, driver string) (closer resources.ResourceCloser, err error) { - defer func() { - if err != nil && closer != nil { - // best effort clean up allocated resource on failure - if releaseErr := closer.Release(ctx); releaseErr != nil { - log.G(ctx).WithError(releaseErr).Error("failed to release container resource") - } - closer = nil - } - }() - if vm.OS() == "windows" { - options := vm.DefaultVSMBOptions(true) - closer, err = vm.AddVSMB(ctx, driver, options) - if err != nil { - return closer, fmt.Errorf("failed to add VSMB share to utility VM for path %+v: %s", driver, err) - } - uvmPath, err := vm.GetVSMBUvmPath(ctx, driver, true) - if err != nil { - return closer, err - } - return closer, execPnPInstallDriver(ctx, vm, uvmPath) - } - uvmPathForShare := fmt.Sprintf(guestpath.LCOWGlobalMountPrefixFmt, vm.UVMMountCounter()) - scsiCloser, err := vm.AddSCSI(ctx, driver, uvmPathForShare, true, false, []string{}, uvm.VMAccessTypeIndividual) - if err != nil { - return closer, fmt.Errorf("failed to add SCSI disk to utility VM for path %+v: %s", driver, err) - } - return scsiCloser, execModprobeInstallDriver(ctx, vm, uvmPathForShare) -} - -func execModprobeInstallDriver(ctx context.Context, vm *uvm.UtilityVM, driverDir string) error { - p, l, err := cmd.CreateNamedPipeListener() - if err != nil { - return err - } - defer l.Close() - - var stderrOutput string - errChan := make(chan error) - - go readAllPipeOutput(l, errChan, &stderrOutput) - - args := []string{ - "/bin/install-drivers", - driverDir, - } - req := &cmd.CmdProcessRequest{ - Args: args, - Stderr: p, - } - - exitCode, execErr := cmd.ExecInUvm(ctx, vm, req) - - // wait to finish parsing stdout results - select { - case err := <-errChan: - if err != nil { - return errors.Wrap(err, execErr.Error()) - } - case <-ctx.Done(): - return errors.Wrap(ctx.Err(), execErr.Error()) - } - - if execErr != nil && execErr != noExecOutputErr { - return errors.Wrapf(execErr, "failed to install driver %s in uvm with exit code %d: %v", driverDir, exitCode, stderrOutput) - } - - log.G(ctx).WithField("added drivers", driverDir).Debug("installed drivers") - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/devices/pnp.go b/test/vendor/github.com/Microsoft/hcsshim/internal/devices/pnp.go deleted file mode 100644 index 002fd2b9f5..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/devices/pnp.go +++ /dev/null @@ -1,125 +0,0 @@ -//go:build windows -// +build windows - -package devices - -import ( - "context" - "fmt" - "io/ioutil" - "net" - "strings" - - "github.com/Microsoft/hcsshim/internal/cmd" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/Microsoft/hcsshim/internal/winapi" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - uvmPnpExePath = "C:\\Windows\\System32\\pnputil.exe" - pnputilNoMoreItemsErrorMessage = `driver not ranked higher than existing driver in UVM. - if drivers were not previously present in the UVM, this - is an expected race and can be ignored.` -) - -var noExecOutputErr = errors.New("failed to get any pipe output") - -// createPnPInstallDriverCommand creates a pnputil command to add and install drivers -// present in `driverUVMPath` and all subdirectories. -func createPnPInstallDriverCommand(driverUVMPath string) []string { - dirFormatted := fmt.Sprintf("%s/*.inf", driverUVMPath) - args := []string{ - "cmd", - "/c", - uvmPnpExePath, - "/add-driver", - dirFormatted, - "/subdirs", - "/install", - } - return args -} - -// execPnPInstallDriver makes the calls to exec in the uvm the pnp command -// that installs a driver previously mounted into the uvm. -func execPnPInstallDriver(ctx context.Context, vm *uvm.UtilityVM, driverDir string) error { - args := createPnPInstallDriverCommand(driverDir) - cmdReq := &cmd.CmdProcessRequest{ - Args: args, - } - exitCode, err := cmd.ExecInUvm(ctx, vm, cmdReq) - if err != nil && exitCode != winapi.ERROR_NO_MORE_ITEMS { - return errors.Wrapf(err, "failed to install driver %s in uvm with exit code %d", driverDir, exitCode) - } else if exitCode == winapi.ERROR_NO_MORE_ITEMS { - // As mentioned in `pnputilNoMoreItemsErrorMessage`, this exit code comes from pnputil - // but is not necessarily an error - log.G(ctx).WithFields(logrus.Fields{ - logfields.UVMID: vm.ID(), - "driver": driverDir, - "error": pnputilNoMoreItemsErrorMessage, - }).Warn("expected version of driver may not have been installed") - } - - log.G(ctx).WithField("added drivers", driverDir).Debug("installed drivers") - return nil -} - -// readCsPipeOutput is a helper function that connects to a listener and reads -// the connection's comma separated output until done. resulting comma separated -// values are returned in the `result` param. The `errChan` param is used to -// propagate an errors to the calling function. -func readCsPipeOutput(l net.Listener, errChan chan<- error, result *[]string) { - defer close(errChan) - c, err := l.Accept() - if err != nil { - errChan <- errors.Wrapf(err, "failed to accept named pipe") - return - } - bytes, err := ioutil.ReadAll(c) - if err != nil { - errChan <- err - return - } - - elementsAsString := strings.TrimSuffix(string(bytes), "\n") - elements := strings.Split(elementsAsString, ",") - *result = append(*result, elements...) - - if len(*result) == 0 { - errChan <- noExecOutputErr - return - } - - errChan <- nil -} - -// readAllPipeOutput is a helper function that connects to a listener and attempts to -// read the connection's entire output. Resulting output is returned as a string -// in the `result` param. The `errChan` param is used to propagate an errors to -// the calling function. -func readAllPipeOutput(l net.Listener, errChan chan<- error, result *string) { - defer close(errChan) - c, err := l.Accept() - if err != nil { - errChan <- errors.Wrapf(err, "failed to accept named pipe") - return - } - bytes, err := ioutil.ReadAll(c) - if err != nil { - errChan <- err - return - } - - *result = string(bytes) - - if len(*result) == 0 { - errChan <- noExecOutputErr - return - } - - errChan <- nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/extendedtask/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/extendedtask/doc.go deleted file mode 100644 index ca0fdd8c0c..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/extendedtask/doc.go +++ /dev/null @@ -1 +0,0 @@ -package extendedtask diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/extendedtask/extendedtask.pb.go b/test/vendor/github.com/Microsoft/hcsshim/internal/extendedtask/extendedtask.pb.go deleted file mode 100644 index c13f92defb..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/extendedtask/extendedtask.pb.go +++ /dev/null @@ -1,550 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/Microsoft/hcsshim/internal/extendedtask/extendedtask.proto - -package extendedtask - -import ( - context "context" - fmt "fmt" - github_com_containerd_ttrpc "github.com/containerd/ttrpc" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ComputeProcessorInfoRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ComputeProcessorInfoRequest) Reset() { *m = ComputeProcessorInfoRequest{} } -func (*ComputeProcessorInfoRequest) ProtoMessage() {} -func (*ComputeProcessorInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c90988f6b70b2a29, []int{0} -} -func (m *ComputeProcessorInfoRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ComputeProcessorInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ComputeProcessorInfoRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ComputeProcessorInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ComputeProcessorInfoRequest.Merge(m, src) -} -func (m *ComputeProcessorInfoRequest) XXX_Size() int { - return m.Size() -} -func (m *ComputeProcessorInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ComputeProcessorInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ComputeProcessorInfoRequest proto.InternalMessageInfo - -type ComputeProcessorInfoResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ComputeProcessorInfoResponse) Reset() { *m = ComputeProcessorInfoResponse{} } -func (*ComputeProcessorInfoResponse) ProtoMessage() {} -func (*ComputeProcessorInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c90988f6b70b2a29, []int{1} -} -func (m *ComputeProcessorInfoResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ComputeProcessorInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ComputeProcessorInfoResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ComputeProcessorInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ComputeProcessorInfoResponse.Merge(m, src) -} -func (m *ComputeProcessorInfoResponse) XXX_Size() int { - return m.Size() -} -func (m *ComputeProcessorInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ComputeProcessorInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ComputeProcessorInfoResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ComputeProcessorInfoRequest)(nil), "ComputeProcessorInfoRequest") - proto.RegisterType((*ComputeProcessorInfoResponse)(nil), "ComputeProcessorInfoResponse") -} - -func init() { - proto.RegisterFile("github.com/Microsoft/hcsshim/internal/extendedtask/extendedtask.proto", fileDescriptor_c90988f6b70b2a29) -} - -var fileDescriptor_c90988f6b70b2a29 = []byte{ - // 249 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4d, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xf7, 0xcd, 0x4c, 0x2e, 0xca, 0x2f, 0xce, 0x4f, 0x2b, - 0xd1, 0xcf, 0x48, 0x2e, 0x2e, 0xce, 0xc8, 0xcc, 0xd5, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, - 0xcc, 0xd1, 0x4f, 0xad, 0x28, 0x49, 0xcd, 0x4b, 0x49, 0x4d, 0x29, 0x49, 0x2c, 0xce, 0x46, 0xe1, - 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x4b, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x99, 0xfa, 0x20, - 0x16, 0x44, 0x54, 0xc9, 0x94, 0x4b, 0xda, 0x39, 0x3f, 0xb7, 0xa0, 0xb4, 0x24, 0x35, 0xa0, 0x28, - 0x3f, 0x39, 0xb5, 0xb8, 0x38, 0xbf, 0xc8, 0x33, 0x2f, 0x2d, 0x3f, 0x28, 0xb5, 0xb0, 0x34, 0xb5, - 0xb8, 0x44, 0x48, 0x8c, 0x8b, 0x29, 0x33, 0x45, 0x82, 0x51, 0x81, 0x51, 0x83, 0xd3, 0x89, 0xed, - 0xd1, 0x3d, 0x79, 0x26, 0x4f, 0x97, 0x20, 0xa6, 0xcc, 0x14, 0x25, 0x13, 0x2e, 0x19, 0xec, 0xda, - 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0x93, 0xf3, 0x4b, 0xf3, 0x4a, 0xc0, - 0x5a, 0x59, 0x83, 0x20, 0x1c, 0xa3, 0x64, 0x2e, 0x1e, 0x57, 0xa8, 0xc3, 0x42, 0x12, 0x8b, 0xb3, - 0x85, 0x82, 0xb9, 0x44, 0xb0, 0x99, 0x22, 0x24, 0xa3, 0x87, 0xc7, 0x4d, 0x52, 0xb2, 0x7a, 0xf8, - 0xac, 0x76, 0x8a, 0x3c, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, - 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x3d, 0xe9, - 0x01, 0x69, 0x8d, 0xcc, 0x89, 0x60, 0x48, 0x62, 0x03, 0x87, 0x9a, 0x31, 0x20, 0x00, 0x00, 0xff, - 0xff, 0x1d, 0x33, 0xbb, 0x33, 0x94, 0x01, 0x00, 0x00, -} - -func (m *ComputeProcessorInfoRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ComputeProcessorInfoRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ComputeProcessorInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintExtendedtask(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ComputeProcessorInfoResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ComputeProcessorInfoResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ComputeProcessorInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Count != 0 { - i = encodeVarintExtendedtask(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintExtendedtask(dAtA []byte, offset int, v uint64) int { - offset -= sovExtendedtask(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ComputeProcessorInfoRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovExtendedtask(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ComputeProcessorInfoResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Count != 0 { - n += 1 + sovExtendedtask(uint64(m.Count)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovExtendedtask(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozExtendedtask(x uint64) (n int) { - return sovExtendedtask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ComputeProcessorInfoRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ComputeProcessorInfoRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ComputeProcessorInfoResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ComputeProcessorInfoResponse{`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringExtendedtask(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} - -type ExtendedTaskService interface { - ComputeProcessorInfo(ctx context.Context, req *ComputeProcessorInfoRequest) (*ComputeProcessorInfoResponse, error) -} - -func RegisterExtendedTaskService(srv *github_com_containerd_ttrpc.Server, svc ExtendedTaskService) { - srv.Register("ExtendedTask", map[string]github_com_containerd_ttrpc.Method{ - "ComputeProcessorInfo": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ComputeProcessorInfoRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.ComputeProcessorInfo(ctx, &req) - }, - }) -} - -type extendedTaskClient struct { - client *github_com_containerd_ttrpc.Client -} - -func NewExtendedTaskClient(client *github_com_containerd_ttrpc.Client) ExtendedTaskService { - return &extendedTaskClient{ - client: client, - } -} - -func (c *extendedTaskClient) ComputeProcessorInfo(ctx context.Context, req *ComputeProcessorInfoRequest) (*ComputeProcessorInfoResponse, error) { - var resp ComputeProcessorInfoResponse - if err := c.client.Call(ctx, "ExtendedTask", "ComputeProcessorInfo", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} -func (m *ComputeProcessorInfoRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExtendedtask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComputeProcessorInfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComputeProcessorInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExtendedtask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthExtendedtask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthExtendedtask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipExtendedtask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthExtendedtask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComputeProcessorInfoResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExtendedtask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComputeProcessorInfoResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComputeProcessorInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExtendedtask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipExtendedtask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthExtendedtask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipExtendedtask(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowExtendedtask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowExtendedtask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowExtendedtask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthExtendedtask - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupExtendedtask - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthExtendedtask - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthExtendedtask = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowExtendedtask = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupExtendedtask = fmt.Errorf("proto: unexpected end of group") -) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/extendedtask/extendedtask.proto b/test/vendor/github.com/Microsoft/hcsshim/internal/extendedtask/extendedtask.proto deleted file mode 100644 index eaa1fff7a6..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/extendedtask/extendedtask.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/Microsoft/hcsshim/internal/extendedtask;extendedtask"; - -import weak "gogoproto/gogo.proto"; - -service ExtendedTask { - rpc ComputeProcessorInfo(ComputeProcessorInfoRequest) returns (ComputeProcessorInfoResponse); -} - -message ComputeProcessorInfoRequest { - string id = 1; -} - -message ComputeProcessorInfoResponse { - int32 count = 1; -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/bridge.go b/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/bridge.go deleted file mode 100644 index 18d293adf5..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/bridge.go +++ /dev/null @@ -1,458 +0,0 @@ -//go:build windows - -package gcs - -import ( - "bufio" - "bytes" - "context" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io" - "net" - "os" - "sync" - "syscall" - "time" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" - - "github.com/Microsoft/hcsshim/internal/log" -) - -const ( - hdrSize = 16 - hdrOffType = 0 - hdrOffSize = 4 - hdrOffID = 8 - - // maxMsgSize is the maximum size of an incoming message. This is not - // enforced by the guest today but some maximum must be set to avoid - // unbounded allocations. - maxMsgSize = 0x10000 -) - -type requestMessage interface { - Base() *requestBase -} - -type responseMessage interface { - Base() *responseBase -} - -// rpc represents an outstanding rpc request to the guest -type rpc struct { - proc rpcProc - id int64 - req requestMessage - resp responseMessage - brdgErr error // error encountered when sending the request or unmarshaling the result - ch chan struct{} -} - -// bridge represents a communcations bridge with the guest. It handles the -// transport layer but (mostly) does not parse or construct the message payload. -type bridge struct { - // Timeout is the time a synchronous RPC must respond within. - Timeout time.Duration - - mu sync.Mutex - nextID int64 - rpcs map[int64]*rpc - conn io.ReadWriteCloser - rpcCh chan *rpc - notify notifyFunc - closed bool - log *logrus.Entry - brdgErr error - waitCh chan struct{} -} - -var ( - errBridgeClosed = errors.New("bridge closed") -) - -const ( - // bridgeFailureTimeout is the default value for bridge.Timeout - bridgeFailureTimeout = time.Minute * 5 -) - -type notifyFunc func(*containerNotification) error - -// newBridge returns a bridge on `conn`. It calls `notify` when a -// notification message arrives from the guest. It logs transport errors and -// traces using `log`. -func newBridge(conn io.ReadWriteCloser, notify notifyFunc, log *logrus.Entry) *bridge { - return &bridge{ - conn: conn, - rpcs: make(map[int64]*rpc), - rpcCh: make(chan *rpc), - waitCh: make(chan struct{}), - notify: notify, - log: log, - Timeout: bridgeFailureTimeout, - } -} - -// Start begins the bridge send and receive goroutines. -func (brdg *bridge) Start() { - go brdg.recvLoopRoutine() - go brdg.sendLoop() -} - -// kill terminates the bridge, closing the connection and causing all new and -// existing RPCs to fail. -func (brdg *bridge) kill(err error) { - brdg.mu.Lock() - if brdg.closed { - brdg.mu.Unlock() - if err != nil { - brdg.log.WithError(err).Warn("bridge error, already terminated") - } - return - } - brdg.closed = true - brdg.mu.Unlock() - brdg.brdgErr = err - if err != nil { - brdg.log.WithError(err).Error("bridge forcibly terminating") - } else { - brdg.log.Debug("bridge terminating") - } - brdg.conn.Close() - close(brdg.waitCh) -} - -// Close closes the bridge. Calling RPC or AsyncRPC after calling Close will -// panic. -func (brdg *bridge) Close() error { - brdg.kill(nil) - return brdg.brdgErr -} - -// Wait waits for the bridge connection to terminate and returns the bridge -// error, if any. -func (brdg *bridge) Wait() error { - <-brdg.waitCh - return brdg.brdgErr -} - -// AsyncRPC sends an RPC request to the guest but does not wait for a response. -// If the message cannot be sent before the context is done, then an error is -// returned. -func (brdg *bridge) AsyncRPC(ctx context.Context, proc rpcProc, req requestMessage, resp responseMessage) (*rpc, error) { - call := &rpc{ - ch: make(chan struct{}), - proc: proc, - req: req, - resp: resp, - } - if err := ctx.Err(); err != nil { - return nil, err - } - // Send the request. - select { - case brdg.rpcCh <- call: - return call, nil - case <-brdg.waitCh: - err := brdg.brdgErr - if err == nil { - err = errBridgeClosed - } - return nil, err - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -func (call *rpc) complete(err error) { - call.brdgErr = err - close(call.ch) -} - -type rpcError struct { - result int32 - message string -} - -func (err *rpcError) Error() string { - msg := err.message - if msg == "" { - msg = windows.Errno(err.result).Error() - } - return "guest RPC failure: " + msg -} - -// IsNotExist is a helper function to determine if the inner rpc error is Not Exist -func IsNotExist(err error) bool { - switch rerr := err.(type) { - case *rpcError: - return uint32(rerr.result) == hrComputeSystemDoesNotExist - } - return false -} - -// Err returns the RPC's result. This may be a transport error or an error from -// the message response. -func (call *rpc) Err() error { - if call.brdgErr != nil { - return call.brdgErr - } - resp := call.resp.Base() - if resp.Result == 0 { - return nil - } - return &rpcError{result: resp.Result, message: resp.ErrorMessage} -} - -// Done returns whether the RPC has completed. -func (call *rpc) Done() bool { - select { - case <-call.ch: - return true - default: - return false - } -} - -// Wait waits for the RPC to complete. -func (call *rpc) Wait() { - <-call.ch -} - -// RPC issues a synchronous RPC request. Returns immediately if the context -// becomes done and the message is not sent. -// -// If allowCancel is set and the context becomes done, returns an error without -// waiting for a response. Avoid this on messages that are not idempotent or -// otherwise safe to ignore the response of. -func (brdg *bridge) RPC(ctx context.Context, proc rpcProc, req requestMessage, resp responseMessage, allowCancel bool) error { - call, err := brdg.AsyncRPC(ctx, proc, req, resp) - if err != nil { - return err - } - var ctxDone <-chan struct{} - if allowCancel { - // This message can be safely cancelled by ignoring the response. - ctxDone = ctx.Done() - } - t := time.NewTimer(brdg.Timeout) - defer t.Stop() - select { - case <-call.ch: - return call.Err() - case <-ctxDone: - brdg.log.WithField("reason", ctx.Err()).Warn("ignoring response to bridge message") - return ctx.Err() - case <-t.C: - brdg.kill(errors.New("message timeout")) - <-call.ch - return call.Err() - } -} - -func (brdg *bridge) recvLoopRoutine() { - brdg.kill(brdg.recvLoop()) - // Fail any remaining RPCs. - brdg.mu.Lock() - rpcs := brdg.rpcs - brdg.rpcs = nil - brdg.mu.Unlock() - for _, call := range rpcs { - call.complete(errBridgeClosed) - } -} - -func readMessage(r io.Reader) (int64, msgType, []byte, error) { - var h [hdrSize]byte - _, err := io.ReadFull(r, h[:]) - if err != nil { - return 0, 0, nil, err - } - typ := msgType(binary.LittleEndian.Uint32(h[hdrOffType:])) - n := binary.LittleEndian.Uint32(h[hdrOffSize:]) - id := int64(binary.LittleEndian.Uint64(h[hdrOffID:])) - if n < hdrSize || n > maxMsgSize { - return 0, 0, nil, fmt.Errorf("invalid message size %d", n) - } - n -= hdrSize - b := make([]byte, n) - _, err = io.ReadFull(r, b) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, 0, nil, err - } - return id, typ, b, nil -} - -func isLocalDisconnectError(err error) bool { - if o, ok := err.(*net.OpError); ok { - if s, ok := o.Err.(*os.SyscallError); ok { - return s.Err == syscall.WSAECONNABORTED - } - } - return false -} - -func (brdg *bridge) recvLoop() error { - br := bufio.NewReader(brdg.conn) - for { - id, typ, b, err := readMessage(br) - if err != nil { - if err == io.EOF || isLocalDisconnectError(err) { - return nil - } - return fmt.Errorf("bridge read failed: %s", err) - } - brdg.log.WithFields(logrus.Fields{ - "payload": string(b), - "type": typ.String(), - "message-id": id}).Debug("bridge receive") - switch typ & msgTypeMask { - case msgTypeResponse: - // Find the request associated with this response. - brdg.mu.Lock() - call := brdg.rpcs[id] - delete(brdg.rpcs, id) - brdg.mu.Unlock() - if call == nil { - return fmt.Errorf("bridge received unknown rpc response for id %d, type %s", id, typ) - } - err := json.Unmarshal(b, call.resp) - if err != nil { - err = fmt.Errorf("bridge response unmarshal failed: %s", err) - } else if resp := call.resp.Base(); resp.Result != 0 { - for _, rec := range resp.ErrorRecords { - brdg.log.WithFields(logrus.Fields{ - "message-id": id, - "result": rec.Result, - "result-message": windows.Errno(rec.Result).Error(), - "error-message": rec.Message, - "stack": rec.StackTrace, - "module": rec.ModuleName, - "file": rec.FileName, - "line": rec.Line, - "function": rec.FunctionName, - }).Error("bridge RPC error record") - } - } - call.complete(err) - if err != nil { - return err - } - - case msgTypeNotify: - if typ != notifyContainer|msgTypeNotify { - return fmt.Errorf("bridge received unknown unknown notification message %s", typ) - } - var ntf containerNotification - ntf.ResultInfo.Value = &json.RawMessage{} - err := json.Unmarshal(b, &ntf) - if err != nil { - return fmt.Errorf("bridge response unmarshal failed: %s", err) - } - err = brdg.notify(&ntf) - if err != nil { - return fmt.Errorf("bridge notification failed: %s", err) - } - default: - return fmt.Errorf("bridge received unknown unknown message type %s", typ) - } - } -} - -func (brdg *bridge) sendLoop() { - var buf bytes.Buffer - enc := json.NewEncoder(&buf) - enc.SetEscapeHTML(false) - for { - select { - case <-brdg.waitCh: - // The bridge has been killed. - return - case call := <-brdg.rpcCh: - err := brdg.sendRPC(&buf, enc, call) - if err != nil { - brdg.kill(err) - return - } - } - } -} - -func (brdg *bridge) writeMessage(buf *bytes.Buffer, enc *json.Encoder, typ msgType, id int64, req interface{}) error { - // Prepare the buffer with the message. - var h [hdrSize]byte - binary.LittleEndian.PutUint32(h[hdrOffType:], uint32(typ)) - binary.LittleEndian.PutUint64(h[hdrOffID:], uint64(id)) - buf.Write(h[:]) - err := enc.Encode(req) - if err != nil { - return fmt.Errorf("bridge encode: %s", err) - } - // Update the message header with the size. - binary.LittleEndian.PutUint32(buf.Bytes()[hdrOffSize:], uint32(buf.Len())) - - if brdg.log.Logger.GetLevel() >= logrus.DebugLevel { - b := buf.Bytes()[hdrSize:] - switch typ { - // container environment vars are in rpCreate for linux; rpcExecuteProcess for windows - case msgType(rpcCreate) | msgTypeRequest: - b, err = log.ScrubBridgeCreate(b) - case msgType(rpcExecuteProcess) | msgTypeRequest: - b, err = log.ScrubBridgeExecProcess(b) - } - if err != nil { - brdg.log.WithError(err).Warning("could not scrub bridge payload") - } - brdg.log.WithFields(logrus.Fields{ - "payload": string(b), - "type": typ.String(), - "message-id": id}).Debug("bridge send") - } - - // Write the message. - _, err = buf.WriteTo(brdg.conn) - if err != nil { - return fmt.Errorf("bridge write: %s", err) - } - return nil -} - -func (brdg *bridge) sendRPC(buf *bytes.Buffer, enc *json.Encoder, call *rpc) error { - // Prepare the message for the response. - brdg.mu.Lock() - if brdg.rpcs == nil { - brdg.mu.Unlock() - call.complete(errBridgeClosed) - return nil - } - id := brdg.nextID - call.id = id - brdg.rpcs[id] = call - brdg.nextID++ - brdg.mu.Unlock() - typ := msgType(call.proc) | msgTypeRequest - err := brdg.writeMessage(buf, enc, typ, id, call.req) - if err != nil { - // Try to reclaim this request and fail it. - brdg.mu.Lock() - if brdg.rpcs[id] == nil { - call = nil - } - delete(brdg.rpcs, id) - brdg.mu.Unlock() - if call != nil { - call.complete(err) - } else { - brdg.log.WithError(err).Error("bridge write failed but call is already complete") - } - return err - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/container.go b/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/container.go deleted file mode 100644 index 84889a8608..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/container.go +++ /dev/null @@ -1,246 +0,0 @@ -//go:build windows - -package gcs - -import ( - "context" - "errors" - "sync" - "time" - - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -const hrComputeSystemDoesNotExist = 0xc037010e - -// Container implements the cow.Container interface for containers -// created via GuestConnection. -type Container struct { - gc *GuestConnection - id string - notifyCh chan struct{} - closeCh chan struct{} - closeOnce sync.Once -} - -var _ cow.Container = &Container{} - -// CreateContainer creates a container using ID `cid` and `cfg`. The request -// will likely not be cancellable even if `ctx` becomes done. -func (gc *GuestConnection) CreateContainer(ctx context.Context, cid string, config interface{}) (_ *Container, err error) { - ctx, span := oc.StartSpan(ctx, "gcs::GuestConnection::CreateContainer", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", cid)) - - c := &Container{ - gc: gc, - id: cid, - notifyCh: make(chan struct{}), - closeCh: make(chan struct{}), - } - err = gc.requestNotify(cid, c.notifyCh) - if err != nil { - return nil, err - } - req := containerCreate{ - requestBase: makeRequest(ctx, cid), - ContainerConfig: anyInString{config}, - } - var resp containerCreateResponse - err = gc.brdg.RPC(ctx, rpcCreate, &req, &resp, false) - if err != nil { - return nil, err - } - go c.waitBackground() - return c, nil -} - -// CloneContainer just creates the wrappers and sets up notification requests for a -// container that is already running inside the UVM (after cloning). -func (gc *GuestConnection) CloneContainer(ctx context.Context, cid string) (_ *Container, err error) { - c := &Container{ - gc: gc, - id: cid, - notifyCh: make(chan struct{}), - closeCh: make(chan struct{}), - } - err = gc.requestNotify(cid, c.notifyCh) - if err != nil { - return nil, err - } - go c.waitBackground() - return c, nil -} - -// OS returns the operating system of the container, "linux" or "windows". -func (c *Container) OS() string { - return c.gc.os -} - -// IsOCI specifies whether CreateProcess should be called with an OCI -// specification in its input. -func (c *Container) IsOCI() bool { - return c.gc.os != "windows" -} - -// Close releases associated with the container. -func (c *Container) Close() error { - c.closeOnce.Do(func() { - _, span := oc.StartSpan(context.Background(), "gcs::Container::Close") - defer span.End() - span.AddAttributes(trace.StringAttribute("cid", c.id)) - }) - return nil -} - -// CreateProcess creates a process in the container. -func (c *Container) CreateProcess(ctx context.Context, config interface{}) (_ cow.Process, err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Container::CreateProcess", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", c.id)) - - return c.gc.exec(ctx, c.id, config) -} - -// ID returns the container's ID. -func (c *Container) ID() string { - return c.id -} - -// Modify sends a modify request to the container. -func (c *Container) Modify(ctx context.Context, config interface{}) (err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Container::Modify", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", c.id)) - - req := containerModifySettings{ - requestBase: makeRequest(ctx, c.id), - Request: config, - } - var resp responseBase - return c.gc.brdg.RPC(ctx, rpcModifySettings, &req, &resp, false) -} - -// Properties returns the requested container properties targeting a V1 schema container. -func (c *Container) Properties(ctx context.Context, types ...schema1.PropertyType) (_ *schema1.ContainerProperties, err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Container::Properties", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", c.id)) - - req := containerGetProperties{ - requestBase: makeRequest(ctx, c.id), - Query: containerPropertiesQuery{PropertyTypes: types}, - } - var resp containerGetPropertiesResponse - err = c.gc.brdg.RPC(ctx, rpcGetProperties, &req, &resp, true) - if err != nil { - return nil, err - } - return (*schema1.ContainerProperties)(&resp.Properties), nil -} - -// PropertiesV2 returns the requested container properties targeting a V2 schema container. -func (c *Container) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Container::PropertiesV2", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", c.id)) - - req := containerGetPropertiesV2{ - requestBase: makeRequest(ctx, c.id), - Query: containerPropertiesQueryV2{PropertyTypes: types}, - } - var resp containerGetPropertiesResponseV2 - err = c.gc.brdg.RPC(ctx, rpcGetProperties, &req, &resp, true) - if err != nil { - return nil, err - } - return (*hcsschema.Properties)(&resp.Properties), nil -} - -// Start starts the container. -func (c *Container) Start(ctx context.Context) (err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Container::Start", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", c.id)) - - req := makeRequest(ctx, c.id) - var resp responseBase - return c.gc.brdg.RPC(ctx, rpcStart, &req, &resp, false) -} - -func (c *Container) shutdown(ctx context.Context, proc rpcProc) error { - req := makeRequest(ctx, c.id) - var resp responseBase - err := c.gc.brdg.RPC(ctx, proc, &req, &resp, true) - if err != nil { - if uint32(resp.Result) != hrComputeSystemDoesNotExist { - return err - } - select { - case <-c.notifyCh: - default: - log.G(ctx).WithError(err).Warn("ignoring missing container") - } - } - return nil -} - -// Shutdown sends a graceful shutdown request to the container. The container -// might not be terminated by the time the request completes (and might never -// terminate). -func (c *Container) Shutdown(ctx context.Context) (err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Container::Shutdown", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", c.id)) - - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - return c.shutdown(ctx, rpcShutdownGraceful) -} - -// Terminate sends a forceful terminate request to the container. The container -// might not be terminated by the time the request completes (and might never -// terminate). -func (c *Container) Terminate(ctx context.Context) (err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Container::Terminate", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", c.id)) - - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - return c.shutdown(ctx, rpcShutdownForced) -} - -// Wait waits for the container to terminate (or Close to be called, or the -// guest connection to terminate). -func (c *Container) Wait() error { - select { - case <-c.notifyCh: - return nil - case <-c.closeCh: - return errors.New("container closed") - } -} - -func (c *Container) waitBackground() { - ctx, span := oc.StartSpan(context.Background(), "gcs::Container::waitBackground") - defer span.End() - span.AddAttributes(trace.StringAttribute("cid", c.id)) - - err := c.Wait() - log.G(ctx).Debug("container exited") - oc.SetSpanStatus(span, err) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/doc.go deleted file mode 100644 index 260915232f..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/doc.go +++ /dev/null @@ -1 +0,0 @@ -package gcs diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/guestconnection.go b/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/guestconnection.go deleted file mode 100644 index a90fba15e9..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/guestconnection.go +++ /dev/null @@ -1,310 +0,0 @@ -//go:build windows - -package gcs - -import ( - "context" - "encoding/base64" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "net" - "strings" - "sync" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -const ( - protocolVersion = 4 - - firstIoChannelVsockPort = LinuxGcsVsockPort + 1 - nullContainerID = "00000000-0000-0000-0000-000000000000" -) - -// IoListenFunc is a type for a function that creates a listener for a VM for -// the vsock port `port`. -type IoListenFunc func(port uint32) (net.Listener, error) - -// HvsockIoListen returns an implementation of IoListenFunc that listens -// on the specified vsock port for the VM specified by `vmID`. -func HvsockIoListen(vmID guid.GUID) IoListenFunc { - return func(port uint32) (net.Listener, error) { - return winio.ListenHvsock(&winio.HvsockAddr{ - VMID: vmID, - ServiceID: winio.VsockServiceID(port), - }) - } -} - -type InitialGuestState struct { - // Timezone is only honored for Windows guests. - Timezone *hcsschema.TimeZoneInformation -} - -// GuestConnectionConfig contains options for creating a guest connection. -type GuestConnectionConfig struct { - // Conn specifies the connection to use for the bridge. It will be closed - // when there is an error or Close is called. - Conn io.ReadWriteCloser - // Log specifies the logrus entry to use for async log messages. - Log *logrus.Entry - // IoListen is the function to use to create listeners for the stdio connections. - IoListen IoListenFunc - // InitGuestState specifies settings to apply to the guest on creation/start. This includes things such as the timezone for the VM. - InitGuestState *InitialGuestState -} - -// Connect establishes a GCS connection. `gcc.Conn` will be closed by this function. -func (gcc *GuestConnectionConfig) Connect(ctx context.Context, isColdStart bool) (_ *GuestConnection, err error) { - ctx, span := oc.StartSpan(ctx, "gcs::GuestConnectionConfig::Connect", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - gc := &GuestConnection{ - nextPort: firstIoChannelVsockPort, - notifyChs: make(map[string]chan struct{}), - ioListenFn: gcc.IoListen, - } - gc.brdg = newBridge(gcc.Conn, gc.notify, gcc.Log) - gc.brdg.Start() - go func() { - _ = gc.brdg.Wait() - gc.clearNotifies() - }() - err = gc.connect(ctx, isColdStart, gcc.InitGuestState) - if err != nil { - gc.Close() - return nil, err - } - return gc, nil -} - -// GuestConnection represents a connection to the GCS. -type GuestConnection struct { - brdg *bridge - ioListenFn IoListenFunc - mu sync.Mutex - nextPort uint32 - notifyChs map[string]chan struct{} - caps schema1.GuestDefinedCapabilities - os string -} - -var _ cow.ProcessHost = &GuestConnection{} - -// Capabilities returns the guest's declared capabilities. -func (gc *GuestConnection) Capabilities() *schema1.GuestDefinedCapabilities { - return &gc.caps -} - -// Protocol returns the protocol version that is in use. -func (gc *GuestConnection) Protocol() uint32 { - return protocolVersion -} - -// connect establishes a GCS connection. It must not be called more than once. -// isColdStart should be true when the UVM is being connected to for the first time post-boot. -// It should be false for subsequent connections (e.g. when connecting to a UVM that has -// been cloned). -func (gc *GuestConnection) connect(ctx context.Context, isColdStart bool, initGuestState *InitialGuestState) (err error) { - req := negotiateProtocolRequest{ - MinimumVersion: protocolVersion, - MaximumVersion: protocolVersion, - } - var resp negotiateProtocolResponse - resp.Capabilities.GuestDefinedCapabilities = &gc.caps - err = gc.brdg.RPC(ctx, rpcNegotiateProtocol, &req, &resp, true) - if err != nil { - return err - } - if resp.Version != protocolVersion { - return fmt.Errorf("unexpected version %d returned", resp.Version) - } - gc.os = strings.ToLower(resp.Capabilities.RuntimeOsType) - if gc.os == "" { - gc.os = "windows" - } - if isColdStart && resp.Capabilities.SendHostCreateMessage { - conf := &uvmConfig{ - SystemType: "Container", - } - if initGuestState != nil && initGuestState.Timezone != nil { - conf.TimeZoneInformation = initGuestState.Timezone - } - createReq := containerCreate{ - requestBase: makeRequest(ctx, nullContainerID), - ContainerConfig: anyInString{conf}, - } - var createResp responseBase - err = gc.brdg.RPC(ctx, rpcCreate, &createReq, &createResp, true) - if err != nil { - return err - } - if resp.Capabilities.SendHostStartMessage { - startReq := makeRequest(ctx, nullContainerID) - var startResp responseBase - err = gc.brdg.RPC(ctx, rpcStart, &startReq, &startResp, true) - if err != nil { - return err - } - } - } - return nil -} - -// Modify sends a modify settings request to the null container. This is -// generally used to prepare virtual hardware that has been added to the guest. -func (gc *GuestConnection) Modify(ctx context.Context, settings interface{}) (err error) { - ctx, span := oc.StartSpan(ctx, "gcs::GuestConnection::Modify", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - req := containerModifySettings{ - requestBase: makeRequest(ctx, nullContainerID), - Request: settings, - } - var resp responseBase - return gc.brdg.RPC(ctx, rpcModifySettings, &req, &resp, false) -} - -func (gc *GuestConnection) DumpStacks(ctx context.Context) (response string, err error) { - ctx, span := oc.StartSpan(ctx, "gcs::GuestConnection::DumpStacks", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - req := dumpStacksRequest{ - requestBase: makeRequest(ctx, nullContainerID), - } - var resp dumpStacksResponse - err = gc.brdg.RPC(ctx, rpcDumpStacks, &req, &resp, false) - return resp.GuestStacks, err -} - -func (gc *GuestConnection) DeleteContainerState(ctx context.Context, cid string) (err error) { - ctx, span := oc.StartSpan(ctx, "gcs::GuestConnection::DeleteContainerState", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", cid)) - - req := deleteContainerStateRequest{ - requestBase: makeRequest(ctx, cid), - } - var resp responseBase - return gc.brdg.RPC(ctx, rpcDeleteContainerState, &req, &resp, false) -} - -// Close terminates the guest connection. It is undefined to call any other -// methods on the connection after this is called. -func (gc *GuestConnection) Close() error { - if gc.brdg == nil { - return nil - } - return gc.brdg.Close() -} - -// CreateProcess creates a process in the container host. -func (gc *GuestConnection) CreateProcess(ctx context.Context, settings interface{}) (_ cow.Process, err error) { - ctx, span := oc.StartSpan(ctx, "gcs::GuestConnection::CreateProcess", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - return gc.exec(ctx, nullContainerID, settings) -} - -// OS returns the operating system of the container's host, "windows" or "linux". -func (gc *GuestConnection) OS() string { - return gc.os -} - -// IsOCI returns false, indicating that CreateProcess should not be called with -// an OCI process spec. -func (gc *GuestConnection) IsOCI() bool { - return false -} - -func (gc *GuestConnection) newIoChannel() (*ioChannel, uint32, error) { - gc.mu.Lock() - port := gc.nextPort - gc.nextPort++ - gc.mu.Unlock() - l, err := gc.ioListenFn(port) - if err != nil { - return nil, 0, err - } - return newIoChannel(l), port, nil -} - -func (gc *GuestConnection) requestNotify(cid string, ch chan struct{}) error { - gc.mu.Lock() - defer gc.mu.Unlock() - if gc.notifyChs == nil { - return errors.New("guest connection closed") - } - if _, ok := gc.notifyChs[cid]; ok { - return fmt.Errorf("container %s already exists", cid) - } - gc.notifyChs[cid] = ch - return nil -} - -func (gc *GuestConnection) notify(ntf *containerNotification) error { - cid := ntf.ContainerID - gc.mu.Lock() - ch := gc.notifyChs[cid] - delete(gc.notifyChs, cid) - gc.mu.Unlock() - if ch == nil { - return fmt.Errorf("container %s not found", cid) - } - logrus.WithField(logfields.ContainerID, cid).Info("container terminated in guest") - close(ch) - return nil -} - -func (gc *GuestConnection) clearNotifies() { - gc.mu.Lock() - chs := gc.notifyChs - gc.notifyChs = nil - gc.mu.Unlock() - for _, ch := range chs { - close(ch) - } -} - -func makeRequest(ctx context.Context, cid string) requestBase { - r := requestBase{ - ContainerID: cid, - } - span := trace.FromContext(ctx) - if span != nil { - sc := span.SpanContext() - r.OpenCensusSpanContext = &ocspancontext{ - TraceID: hex.EncodeToString(sc.TraceID[:]), - SpanID: hex.EncodeToString(sc.SpanID[:]), - TraceOptions: uint32(sc.TraceOptions), - } - if sc.Tracestate != nil { - entries := sc.Tracestate.Entries() - if len(entries) > 0 { - if bytes, err := json.Marshal(sc.Tracestate.Entries()); err == nil { - r.OpenCensusSpanContext.Tracestate = base64.StdEncoding.EncodeToString(bytes) - } else { - log.G(ctx).WithError(err).Warn("failed to encode OpenCensus Tracestate") - } - } - } - } - return r -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/process.go b/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/process.go deleted file mode 100644 index fab6af75c7..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/process.go +++ /dev/null @@ -1,293 +0,0 @@ -//go:build windows - -package gcs - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "sync" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -const ( - hrNotFound = 0x80070490 -) - -// Process represents a process in a container or container host. -type Process struct { - gc *GuestConnection - cid string - id uint32 - waitCall *rpc - waitResp containerWaitForProcessResponse - stdin, stdout, stderr *ioChannel - stdinCloseWriteOnce sync.Once - stdinCloseWriteErr error -} - -var _ cow.Process = &Process{} - -type baseProcessParams struct { - CreateStdInPipe, CreateStdOutPipe, CreateStdErrPipe bool -} - -func (gc *GuestConnection) exec(ctx context.Context, cid string, params interface{}) (_ cow.Process, err error) { - b, err := json.Marshal(params) - if err != nil { - return nil, err - } - var bp baseProcessParams - err = json.Unmarshal(b, &bp) - if err != nil { - return nil, err - } - - req := containerExecuteProcess{ - requestBase: makeRequest(ctx, cid), - Settings: executeProcessSettings{ - ProcessParameters: anyInString{params}, - }, - } - - p := &Process{gc: gc, cid: cid} - defer func() { - if err != nil { - p.Close() - } - }() - - // Construct the stdio channels. Windows guests expect hvsock service IDs - // instead of vsock ports. - var hvsockSettings executeProcessStdioRelaySettings - var vsockSettings executeProcessVsockStdioRelaySettings - if gc.os == "windows" { - req.Settings.StdioRelaySettings = &hvsockSettings - } else { - req.Settings.VsockStdioRelaySettings = &vsockSettings - } - if bp.CreateStdInPipe { - p.stdin, vsockSettings.StdIn, err = gc.newIoChannel() - if err != nil { - return nil, err - } - g := winio.VsockServiceID(vsockSettings.StdIn) - hvsockSettings.StdIn = &g - } - if bp.CreateStdOutPipe { - p.stdout, vsockSettings.StdOut, err = gc.newIoChannel() - if err != nil { - return nil, err - } - g := winio.VsockServiceID(vsockSettings.StdOut) - hvsockSettings.StdOut = &g - } - if bp.CreateStdErrPipe { - p.stderr, vsockSettings.StdErr, err = gc.newIoChannel() - if err != nil { - return nil, err - } - g := winio.VsockServiceID(vsockSettings.StdErr) - hvsockSettings.StdErr = &g - } - - var resp containerExecuteProcessResponse - err = gc.brdg.RPC(ctx, rpcExecuteProcess, &req, &resp, false) - if err != nil { - return nil, err - } - p.id = resp.ProcessID - log.G(ctx).WithField("pid", p.id).Debug("created process pid") - // Start a wait message. - waitReq := containerWaitForProcess{ - requestBase: makeRequest(ctx, cid), - ProcessID: p.id, - TimeoutInMs: 0xffffffff, - } - p.waitCall, err = gc.brdg.AsyncRPC(ctx, rpcWaitForProcess, &waitReq, &p.waitResp) - if err != nil { - return nil, fmt.Errorf("failed to wait on process, leaking process: %s", err) - } - go p.waitBackground() - return p, nil -} - -// Close releases resources associated with the process and closes the -// associated standard IO streams. -func (p *Process) Close() error { - ctx, span := oc.StartSpan(context.Background(), "gcs::Process::Close") - defer span.End() - span.AddAttributes( - trace.StringAttribute("cid", p.cid), - trace.Int64Attribute("pid", int64(p.id))) - - if err := p.stdin.Close(); err != nil { - log.G(ctx).WithError(err).Warn("close stdin failed") - } - if err := p.stdout.Close(); err != nil { - log.G(ctx).WithError(err).Warn("close stdout failed") - } - if err := p.stderr.Close(); err != nil { - log.G(ctx).WithError(err).Warn("close stderr failed") - } - return nil -} - -// CloseStdin causes the process to read EOF on its stdin stream. -func (p *Process) CloseStdin(ctx context.Context) (err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Process::CloseStdin") //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", p.cid), - trace.Int64Attribute("pid", int64(p.id))) - - p.stdinCloseWriteOnce.Do(func() { - p.stdinCloseWriteErr = p.stdin.CloseWrite() - }) - return p.stdinCloseWriteErr -} - -func (p *Process) CloseStdout(ctx context.Context) (err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Process::CloseStdout") //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", p.cid), - trace.Int64Attribute("pid", int64(p.id))) - - return p.stdout.Close() -} - -func (p *Process) CloseStderr(ctx context.Context) (err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Process::CloseStderr") //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", p.cid), - trace.Int64Attribute("pid", int64(p.id))) - - return p.stderr.Close() -} - -// ExitCode returns the process's exit code, or an error if the process is still -// running or the exit code is otherwise unknown. -func (p *Process) ExitCode() (_ int, err error) { - if !p.waitCall.Done() { - return -1, errors.New("process not exited") - } - if err := p.waitCall.Err(); err != nil { - return -1, err - } - return int(p.waitResp.ExitCode), nil -} - -// Kill sends a forceful terminate signal to the process and returns whether the -// signal was delivered. The process might not be terminated by the time this -// returns. -func (p *Process) Kill(ctx context.Context) (_ bool, err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Process::Kill") - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", p.cid), - trace.Int64Attribute("pid", int64(p.id))) - - return p.Signal(ctx, nil) -} - -// Pid returns the process ID. -func (p *Process) Pid() int { - return int(p.id) -} - -// ResizeConsole requests that the pty associated with the process resize its -// window. -func (p *Process) ResizeConsole(ctx context.Context, width, height uint16) (err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Process::ResizeConsole", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", p.cid), - trace.Int64Attribute("pid", int64(p.id))) - - req := containerResizeConsole{ - requestBase: makeRequest(ctx, p.cid), - ProcessID: p.id, - Height: height, - Width: width, - } - var resp responseBase - return p.gc.brdg.RPC(ctx, rpcResizeConsole, &req, &resp, true) -} - -// Signal sends a signal to the process, returning whether it was delivered. -func (p *Process) Signal(ctx context.Context, options interface{}) (_ bool, err error) { - ctx, span := oc.StartSpan(ctx, "gcs::Process::Signal", oc.WithClientSpanKind) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", p.cid), - trace.Int64Attribute("pid", int64(p.id))) - - req := containerSignalProcess{ - requestBase: makeRequest(ctx, p.cid), - ProcessID: p.id, - Options: options, - } - var resp responseBase - // FUTURE: SIGKILL is idempotent and can safely be cancelled, but this interface - // does currently make it easy to determine what signal is being sent. - err = p.gc.brdg.RPC(ctx, rpcSignalProcess, &req, &resp, false) - if err != nil { - if uint32(resp.Result) != hrNotFound { - return false, err - } - if !p.waitCall.Done() { - log.G(ctx).WithFields(logrus.Fields{ - logrus.ErrorKey: err, - logfields.ContainerID: p.cid, - logfields.ProcessID: p.id, - }).Warn("ignoring missing process") - } - return false, nil - } - return true, nil -} - -// Stdio returns the standard IO streams associated with the container. They -// will be closed when Close is called. -func (p *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) { - return p.stdin, p.stdout, p.stderr -} - -// Wait waits for the process (or guest connection) to terminate. -func (p *Process) Wait() error { - p.waitCall.Wait() - return p.waitCall.Err() -} - -func (p *Process) waitBackground() { - ctx, span := oc.StartSpan(context.Background(), "gcs::Process::waitBackground") - defer span.End() - span.AddAttributes( - trace.StringAttribute("cid", p.cid), - trace.Int64Attribute("pid", int64(p.id))) - - p.waitCall.Wait() - ec, err := p.ExitCode() - if err != nil { - log.G(ctx).WithError(err).Error("failed wait") - } - log.G(ctx).WithField("exitCode", ec).Debug("process exited") - oc.SetSpanStatus(span, err) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/protocol.go b/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/protocol.go deleted file mode 100644 index 94e55e4c1e..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/gcs/protocol.go +++ /dev/null @@ -1,371 +0,0 @@ -//go:build windows - -package gcs - -import ( - "encoding/json" - "fmt" - "strconv" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// LinuxGcsVsockPort is the vsock port number that the Linux GCS will -// connect to. -const LinuxGcsVsockPort = 0x40000000 - -// WindowsGcsHvsockServiceID is the hvsock service ID that the Windows GCS -// will connect to. -var WindowsGcsHvsockServiceID = guid.GUID{ - Data1: 0xacef5661, - Data2: 0x84a1, - Data3: 0x4e44, - Data4: [8]uint8{0x85, 0x6b, 0x62, 0x45, 0xe6, 0x9f, 0x46, 0x20}, -} - -// WindowsGcsHvHostID is the hvsock address for the parent of the VM running the GCS -var WindowsGcsHvHostID = guid.GUID{ - Data1: 0x894cc2d6, - Data2: 0x9d79, - Data3: 0x424f, - Data4: [8]uint8{0x93, 0xfe, 0x42, 0x96, 0x9a, 0xe6, 0xd8, 0xd1}, -} - -type anyInString struct { - Value interface{} -} - -func (a *anyInString) MarshalText() ([]byte, error) { - return json.Marshal(a.Value) -} - -func (a *anyInString) UnmarshalText(b []byte) error { - return json.Unmarshal(b, &a.Value) -} - -type rpcProc uint32 - -const ( - rpcCreate rpcProc = (iota+1)<<8 | 1 - rpcStart - rpcShutdownGraceful - rpcShutdownForced - rpcExecuteProcess - rpcWaitForProcess - rpcSignalProcess - rpcResizeConsole - rpcGetProperties - rpcModifySettings - rpcNegotiateProtocol - rpcDumpStacks - rpcDeleteContainerState - rpcUpdateContainer - rpcLifecycleNotification -) - -func (rpc rpcProc) String() string { - switch rpc { - case rpcCreate: - return "Create" - case rpcStart: - return "Start" - case rpcShutdownGraceful: - return "ShutdownGraceful" - case rpcShutdownForced: - return "ShutdownForced" - case rpcExecuteProcess: - return "ExecuteProcess" - case rpcWaitForProcess: - return "WaitForProcess" - case rpcSignalProcess: - return "SignalProcess" - case rpcResizeConsole: - return "ResizeConsole" - case rpcGetProperties: - return "GetProperties" - case rpcModifySettings: - return "ModifySettings" - case rpcNegotiateProtocol: - return "NegotiateProtocol" - case rpcDumpStacks: - return "DumpStacks" - case rpcDeleteContainerState: - return "DeleteContainerState" - case rpcUpdateContainer: - return "UpdateContainer" - case rpcLifecycleNotification: - return "LifecycleNotification" - default: - return "0x" + strconv.FormatUint(uint64(rpc), 16) - } -} - -type msgType uint32 - -const ( - msgTypeRequest msgType = 0x10100000 - msgTypeResponse msgType = 0x20100000 - msgTypeNotify msgType = 0x30100000 - msgTypeMask msgType = 0xfff00000 - - notifyContainer = 1<<8 | 1 -) - -func (typ msgType) String() string { - var s string - switch typ & msgTypeMask { - case msgTypeRequest: - s = "Request(" - case msgTypeResponse: - s = "Response(" - case msgTypeNotify: - s = "Notify(" - switch typ - msgTypeNotify { - case notifyContainer: - s += "Container" - default: - s += fmt.Sprintf("%#x", uint32(typ)) - } - return s + ")" - default: - return fmt.Sprintf("%#x", uint32(typ)) - } - s += rpcProc(typ &^ msgTypeMask).String() - return s + ")" -} - -// ocspancontext is the internal JSON representation of the OpenCensus -// `trace.SpanContext` for fowarding to a GCS that supports it. -type ocspancontext struct { - // TraceID is the `hex` encoded string of the OpenCensus - // `SpanContext.TraceID` to propagate to the guest. - TraceID string `json:",omitempty"` - // SpanID is the `hex` encoded string of the OpenCensus `SpanContext.SpanID` - // to propagate to the guest. - SpanID string `json:",omitempty"` - - // TraceOptions is the OpenCensus `SpanContext.TraceOptions` passed through - // to propagate to the guest. - TraceOptions uint32 `json:",omitempty"` - - // Tracestate is the `base64` encoded string of marshaling the OpenCensus - // `SpanContext.TraceState.Entries()` to JSON. - // - // If `SpanContext.Tracestate == nil || - // len(SpanContext.Tracestate.Entries()) == 0` this will be `""`. - Tracestate string `json:",omitempty"` -} - -type requestBase struct { - ContainerID string `json:"ContainerId"` - ActivityID guid.GUID `json:"ActivityId"` - - // OpenCensusSpanContext is the encoded OpenCensus `trace.SpanContext` if - // set when making the request. - // - // NOTE: This is not a part of the protocol but because its a JSON protocol - // adding fields is a non-breaking change. If the guest supports it this is - // just additive context. - OpenCensusSpanContext *ocspancontext `json:"ocsc,omitempty"` -} - -func (req *requestBase) Base() *requestBase { - return req -} - -type responseBase struct { - Result int32 // HResult - ErrorMessage string `json:",omitempty"` - ActivityID guid.GUID `json:"ActivityId,omitempty"` - ErrorRecords []errorRecord `json:",omitempty"` -} - -type errorRecord struct { - Result int32 // HResult - Message string - StackTrace string `json:",omitempty"` - ModuleName string - FileName string - Line uint32 - FunctionName string `json:",omitempty"` -} - -func (resp *responseBase) Base() *responseBase { - return resp -} - -type negotiateProtocolRequest struct { - requestBase - MinimumVersion uint32 - MaximumVersion uint32 -} - -type negotiateProtocolResponse struct { - responseBase - Version uint32 `json:",omitempty"` - Capabilities gcsCapabilities `json:",omitempty"` -} - -type dumpStacksRequest struct { - requestBase -} - -type dumpStacksResponse struct { - responseBase - GuestStacks string -} - -type deleteContainerStateRequest struct { - requestBase -} - -type containerCreate struct { - requestBase - ContainerConfig anyInString -} - -type uvmConfig struct { - SystemType string // must be "Container" - TimeZoneInformation *hcsschema.TimeZoneInformation -} - -type containerNotification struct { - requestBase - Type string // Compute.System.NotificationType - Operation string // Compute.System.ActiveOperation - Result int32 // HResult - ResultInfo anyInString `json:",omitempty"` -} - -type containerExecuteProcess struct { - requestBase - Settings executeProcessSettings -} - -type executeProcessSettings struct { - ProcessParameters anyInString - StdioRelaySettings *executeProcessStdioRelaySettings `json:",omitempty"` - VsockStdioRelaySettings *executeProcessVsockStdioRelaySettings `json:",omitempty"` -} - -type executeProcessStdioRelaySettings struct { - StdIn *guid.GUID `json:",omitempty"` - StdOut *guid.GUID `json:",omitempty"` - StdErr *guid.GUID `json:",omitempty"` -} - -type executeProcessVsockStdioRelaySettings struct { - StdIn uint32 `json:",omitempty"` - StdOut uint32 `json:",omitempty"` - StdErr uint32 `json:",omitempty"` -} - -type containerResizeConsole struct { - requestBase - ProcessID uint32 `json:"ProcessId"` - Height uint16 - Width uint16 -} - -type containerWaitForProcess struct { - requestBase - ProcessID uint32 `json:"ProcessId"` - TimeoutInMs uint32 -} - -type containerSignalProcess struct { - requestBase - ProcessID uint32 `json:"ProcessId"` - Options interface{} `json:",omitempty"` -} - -type containerPropertiesQuery schema1.PropertyQuery - -func (q *containerPropertiesQuery) MarshalText() ([]byte, error) { - return json.Marshal((*schema1.PropertyQuery)(q)) -} - -func (q *containerPropertiesQuery) UnmarshalText(b []byte) error { - return json.Unmarshal(b, (*schema1.PropertyQuery)(q)) -} - -type containerPropertiesQueryV2 hcsschema.PropertyQuery - -func (q *containerPropertiesQueryV2) MarshalText() ([]byte, error) { - return json.Marshal((*hcsschema.PropertyQuery)(q)) -} - -func (q *containerPropertiesQueryV2) UnmarshalText(b []byte) error { - return json.Unmarshal(b, (*hcsschema.PropertyQuery)(q)) -} - -type containerGetProperties struct { - requestBase - Query containerPropertiesQuery -} - -type containerGetPropertiesV2 struct { - requestBase - Query containerPropertiesQueryV2 -} - -type containerModifySettings struct { - requestBase - Request interface{} -} - -type gcsCapabilities struct { - SendHostCreateMessage bool - SendHostStartMessage bool - HvSocketConfigOnStartup bool - SendLifecycleNotifications bool - SupportedSchemaVersions []hcsschema.Version - RuntimeOsType string - GuestDefinedCapabilities interface{} -} - -type containerCreateResponse struct { - responseBase -} - -type containerExecuteProcessResponse struct { - responseBase - ProcessID uint32 `json:"ProcessId"` -} - -type containerWaitForProcessResponse struct { - responseBase - ExitCode uint32 -} - -type containerProperties schema1.ContainerProperties - -func (p *containerProperties) MarshalText() ([]byte, error) { - return json.Marshal((*schema1.ContainerProperties)(p)) -} - -func (p *containerProperties) UnmarshalText(b []byte) error { - return json.Unmarshal(b, (*schema1.ContainerProperties)(p)) -} - -type containerPropertiesV2 hcsschema.Properties - -func (p *containerPropertiesV2) MarshalText() ([]byte, error) { - return json.Marshal((*hcsschema.Properties)(p)) -} - -func (p *containerPropertiesV2) UnmarshalText(b []byte) error { - return json.Unmarshal(b, (*hcsschema.Properties)(p)) -} - -type containerGetPropertiesResponse struct { - responseBase - Properties containerProperties -} - -type containerGetPropertiesResponseV2 struct { - responseBase - Properties containerPropertiesV2 -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/guest/spec/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/guest/spec/doc.go deleted file mode 100644 index abc59d664f..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/guest/spec/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package spec encapsulates a number of GCS specific oci spec modifications, e.g., -// networking mounts, sandbox path substitutions in guest etc. -package spec diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/guest/spec/spec.go b/test/vendor/github.com/Microsoft/hcsshim/internal/guest/spec/spec.go deleted file mode 100644 index 3057a1a351..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/guest/spec/spec.go +++ /dev/null @@ -1,90 +0,0 @@ -//go:build linux -// +build linux - -// TODO: consider moving oci spec specific code from /internal/guest/runtime/hcsv2/spec.go - -package spec - -import ( - "path/filepath" - "strings" - - "github.com/Microsoft/hcsshim/internal/guestpath" - oci "github.com/opencontainers/runtime-spec/specs-go" -) - -// networkingMountPaths returns an array of mount paths to enable networking -// inside containers. -func networkingMountPaths() []string { - return []string{ - "/etc/hostname", - "/etc/hosts", - "/etc/resolv.conf", - } -} - -// GenerateWorkloadContainerNetworkMounts generates an array of specs.Mount -// required for container networking. Original spec is left untouched and -// it's the responsibility of a caller to update it. -func GenerateWorkloadContainerNetworkMounts(sandboxID string, spec *oci.Spec) []oci.Mount { - var nMounts []oci.Mount - - for _, mountPath := range networkingMountPaths() { - // Don't override if the mount is present in the spec - if MountPresent(mountPath, spec.Mounts) { - continue - } - options := []string{"bind"} - if spec.Root != nil && spec.Root.Readonly { - options = append(options, "ro") - } - trimmedMountPath := strings.TrimPrefix(mountPath, "/etc/") - mt := oci.Mount{ - Destination: mountPath, - Type: "bind", - Source: filepath.Join(SandboxRootDir(sandboxID), trimmedMountPath), - Options: options, - } - nMounts = append(nMounts, mt) - } - return nMounts -} - -// MountPresent checks if mountPath is present in the specMounts array. -func MountPresent(mountPath string, specMounts []oci.Mount) bool { - for _, m := range specMounts { - if m.Destination == mountPath { - return true - } - } - return false -} - -// SandboxRootDir returns the sandbox container root directory inside UVM/host. -func SandboxRootDir(sandboxID string) string { - return filepath.Join(guestpath.LCOWRootPrefixInUVM, sandboxID) -} - -// SandboxMountsDir returns sandbox mounts directory inside UVM/host. -func SandboxMountsDir(sandboxID string) string { - return filepath.Join(SandboxRootDir(sandboxID), "sandboxMounts") -} - -// HugePagesMountsDir returns hugepages mounts directory inside UVM. -func HugePagesMountsDir(sandboxID string) string { - return filepath.Join(SandboxRootDir(sandboxID), "hugepages") -} - -// SandboxMountSource returns sandbox mount path inside UVM -func SandboxMountSource(sandboxID, path string) string { - mountsDir := SandboxMountsDir(sandboxID) - subPath := strings.TrimPrefix(path, guestpath.SandboxMountPrefix) - return filepath.Join(mountsDir, subPath) -} - -// HugePagesMountSource returns hugepages mount path inside UVM -func HugePagesMountSource(sandboxID, path string) string { - mountsDir := HugePagesMountsDir(sandboxID) - subPath := strings.TrimPrefix(path, guestpath.HugePagesMountPrefix) - return filepath.Join(mountsDir, subPath) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/guestpath/paths.go b/test/vendor/github.com/Microsoft/hcsshim/internal/guestpath/paths.go deleted file mode 100644 index be812ba075..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/guestpath/paths.go +++ /dev/null @@ -1,30 +0,0 @@ -package guestpath - -const ( - // LCOWNvidiaMountPath is the path format in LCOW UVM where nvidia tools - // are mounted keep this value in sync with opengcs - LCOWNvidiaMountPath = "/run/nvidia" - // LCOWRootPrefixInUVM is the path inside UVM where LCOW container's root - // file system will be mounted - LCOWRootPrefixInUVM = "/run/gcs/c" - // WCOWRootPrefixInUVM is the path inside UVM where WCOW container's root - // file system will be mounted - WCOWRootPrefixInUVM = `C:\c` - // SandboxMountPrefix is mount prefix used in container spec to mark a - // sandbox-mount - SandboxMountPrefix = "sandbox://" - // HugePagesMountPrefix is mount prefix used in container spec to mark a - // huge-pages mount - HugePagesMountPrefix = "hugepages://" - // LCOWMountPathPrefixFmt is the path format in the LCOW UVM where - // non-global mounts, such as Plan9 mounts are added - LCOWMountPathPrefixFmt = "/mounts/m%d" - // LCOWGlobalMountPrefixFmt is the path format in the LCOW UVM where global - // mounts are added - LCOWGlobalMountPrefixFmt = "/run/mounts/m%d" - // WCOWGlobalMountPrefixFmt is the path prefix format in the WCOW UVM where - // mounts are added - WCOWGlobalMountPrefixFmt = "C:\\mounts\\m%d" - // RootfsPath is part of the container's rootfs path - RootfsPath = "rootfs" -) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go deleted file mode 100644 index 7b27173c3a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go +++ /dev/null @@ -1,163 +0,0 @@ -//go:build windows - -package hcs - -import ( - "fmt" - "sync" - "syscall" - - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/vmcompute" - "github.com/sirupsen/logrus" -) - -var ( - nextCallback uintptr - callbackMap = map[uintptr]*notificationWatcherContext{} - callbackMapLock = sync.RWMutex{} - - notificationWatcherCallback = syscall.NewCallback(notificationWatcher) - - // Notifications for HCS_SYSTEM handles - hcsNotificationSystemExited hcsNotification = 0x00000001 - hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002 - hcsNotificationSystemStartCompleted hcsNotification = 0x00000003 - hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004 - hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005 - hcsNotificationSystemCrashReport hcsNotification = 0x00000006 - hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007 - hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008 - hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009 - hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A - hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B - hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C - hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D - hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E - - // Notifications for HCS_PROCESS handles - hcsNotificationProcessExited hcsNotification = 0x00010000 - - // Common notifications - hcsNotificationInvalid hcsNotification = 0x00000000 - hcsNotificationServiceDisconnect hcsNotification = 0x01000000 -) - -type hcsNotification uint32 - -func (hn hcsNotification) String() string { - switch hn { - case hcsNotificationSystemExited: - return "SystemExited" - case hcsNotificationSystemCreateCompleted: - return "SystemCreateCompleted" - case hcsNotificationSystemStartCompleted: - return "SystemStartCompleted" - case hcsNotificationSystemPauseCompleted: - return "SystemPauseCompleted" - case hcsNotificationSystemResumeCompleted: - return "SystemResumeCompleted" - case hcsNotificationSystemCrashReport: - return "SystemCrashReport" - case hcsNotificationSystemSiloJobCreated: - return "SystemSiloJobCreated" - case hcsNotificationSystemSaveCompleted: - return "SystemSaveCompleted" - case hcsNotificationSystemRdpEnhancedModeStateChanged: - return "SystemRdpEnhancedModeStateChanged" - case hcsNotificationSystemShutdownFailed: - return "SystemShutdownFailed" - case hcsNotificationSystemGetPropertiesCompleted: - return "SystemGetPropertiesCompleted" - case hcsNotificationSystemModifyCompleted: - return "SystemModifyCompleted" - case hcsNotificationSystemCrashInitiated: - return "SystemCrashInitiated" - case hcsNotificationSystemGuestConnectionClosed: - return "SystemGuestConnectionClosed" - case hcsNotificationProcessExited: - return "ProcessExited" - case hcsNotificationInvalid: - return "Invalid" - case hcsNotificationServiceDisconnect: - return "ServiceDisconnect" - default: - return fmt.Sprintf("Unknown: %d", hn) - } -} - -type notificationChannel chan error - -type notificationWatcherContext struct { - channels notificationChannels - handle vmcompute.HcsCallback - - systemID string - processID int -} - -type notificationChannels map[hcsNotification]notificationChannel - -func newSystemChannels() notificationChannels { - channels := make(notificationChannels) - for _, notif := range []hcsNotification{ - hcsNotificationServiceDisconnect, - hcsNotificationSystemExited, - hcsNotificationSystemCreateCompleted, - hcsNotificationSystemStartCompleted, - hcsNotificationSystemPauseCompleted, - hcsNotificationSystemResumeCompleted, - hcsNotificationSystemSaveCompleted, - } { - channels[notif] = make(notificationChannel, 1) - } - return channels -} - -func newProcessChannels() notificationChannels { - channels := make(notificationChannels) - for _, notif := range []hcsNotification{ - hcsNotificationServiceDisconnect, - hcsNotificationProcessExited, - } { - channels[notif] = make(notificationChannel, 1) - } - return channels -} - -func closeChannels(channels notificationChannels) { - for _, c := range channels { - close(c) - } -} - -func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr { - var result error - if int32(notificationStatus) < 0 { - result = interop.Win32FromHresult(notificationStatus) - } - - callbackMapLock.RLock() - context := callbackMap[callbackNumber] - callbackMapLock.RUnlock() - - if context == nil { - return 0 - } - - log := logrus.WithFields(logrus.Fields{ - "notification-type": notificationType.String(), - "system-id": context.systemID, - }) - if context.processID != 0 { - log.Data[logfields.ProcessID] = context.processID - } - log.Debug("HCS notification") - - if channel, ok := context.channels[notificationType]; ok { - channel <- result - } - - return 0 -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go deleted file mode 100644 index d792dda986..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go +++ /dev/null @@ -1 +0,0 @@ -package hcs diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go deleted file mode 100644 index 226dad2fbc..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go +++ /dev/null @@ -1,348 +0,0 @@ -//go:build windows - -package hcs - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net" - "syscall" - - "github.com/Microsoft/hcsshim/internal/log" -) - -var ( - // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists - ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e) - - // ErrElementNotFound is an error encountered when the object being referenced does not exist - ErrElementNotFound = syscall.Errno(0x490) - - // ErrElementNotFound is an error encountered when the object being referenced does not exist - ErrNotSupported = syscall.Errno(0x32) - - // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported - // decimal -2147024883 / hex 0x8007000d - ErrInvalidData = syscall.Errno(0xd) - - // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed - ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed") - - // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method - ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed") - - // ErrInvalidNotificationType is an error encountered when an invalid notification type is used - ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type") - - // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation - ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation") - - // ErrTimeout is an error encountered when waiting on a notification times out - ErrTimeout = errors.New("hcsshim: timeout waiting for notification") - - // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for - // a different expected notification - ErrUnexpectedContainerExit = errors.New("unexpected container exit") - - // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service - // is lost while waiting for a notification - ErrUnexpectedProcessAbort = errors.New("lost communication with compute service") - - // ErrUnexpectedValue is an error encountered when hcs returns an invalid value - ErrUnexpectedValue = errors.New("unexpected value returned from hcs") - - // ErrOperationDenied is an error when hcs attempts an operation that is explicitly denied - ErrOperationDenied = errors.New("operation denied") - - // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container - ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110) - - // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously - ErrVmcomputeOperationPending = syscall.Errno(0xC0370103) - - // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation - ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105) - - // ErrProcNotFound is an error encountered when a procedure look up fails. - ErrProcNotFound = syscall.Errno(0x7f) - - // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 - // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. - ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5) - - // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management - ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d) - - // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message - ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b) - - // ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly - ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106) - - // ErrNotSupported is an error encountered when hcs doesn't support the request - ErrPlatformNotSupported = errors.New("unsupported platform request") - - // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped. - ErrProcessAlreadyStopped = syscall.Errno(0x8037011f) - - // ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that - // compute system has already been closed. - ErrInvalidHandle = syscall.Errno(0x6) -) - -type ErrorEvent struct { - Message string `json:"Message,omitempty"` // Fully formated error message - StackTrace string `json:"StackTrace,omitempty"` // Stack trace in string form - Provider string `json:"Provider,omitempty"` - EventID uint16 `json:"EventId,omitempty"` - Flags uint32 `json:"Flags,omitempty"` - Source string `json:"Source,omitempty"` - //Data []EventData `json:"Data,omitempty"` // Omit this as HCS doesn't encode this well. It's more confusing to include. It is however logged in debug mode (see processHcsResult function) -} - -type hcsResult struct { - Error int32 - ErrorMessage string - ErrorEvents []ErrorEvent `json:"ErrorEvents,omitempty"` -} - -func (ev *ErrorEvent) String() string { - evs := "[Event Detail: " + ev.Message - if ev.StackTrace != "" { - evs += " Stack Trace: " + ev.StackTrace - } - if ev.Provider != "" { - evs += " Provider: " + ev.Provider - } - if ev.EventID != 0 { - evs = fmt.Sprintf("%s EventID: %d", evs, ev.EventID) - } - if ev.Flags != 0 { - evs = fmt.Sprintf("%s flags: %d", evs, ev.Flags) - } - if ev.Source != "" { - evs += " Source: " + ev.Source - } - evs += "]" - return evs -} - -func processHcsResult(ctx context.Context, resultJSON string) []ErrorEvent { - if resultJSON != "" { - result := &hcsResult{} - if err := json.Unmarshal([]byte(resultJSON), result); err != nil { - log.G(ctx).WithError(err).Warning("Could not unmarshal HCS result") - return nil - } - return result.ErrorEvents - } - return nil -} - -type HcsError struct { - Op string - Err error - Events []ErrorEvent -} - -var _ net.Error = &HcsError{} - -func (e *HcsError) Error() string { - s := e.Op + ": " + e.Err.Error() - for _, ev := range e.Events { - s += "\n" + ev.String() - } - return s -} - -func (e *HcsError) Temporary() bool { - err, ok := e.Err.(net.Error) - return ok && err.Temporary() -} - -func (e *HcsError) Timeout() bool { - err, ok := e.Err.(net.Error) - return ok && err.Timeout() -} - -// ProcessError is an error encountered in HCS during an operation on a Process object -type ProcessError struct { - SystemID string - Pid int - Op string - Err error - Events []ErrorEvent -} - -var _ net.Error = &ProcessError{} - -// SystemError is an error encountered in HCS during an operation on a Container object -type SystemError struct { - ID string - Op string - Err error - Events []ErrorEvent -} - -var _ net.Error = &SystemError{} - -func (e *SystemError) Error() string { - s := e.Op + " " + e.ID + ": " + e.Err.Error() - for _, ev := range e.Events { - s += "\n" + ev.String() - } - return s -} - -func (e *SystemError) Temporary() bool { - err, ok := e.Err.(net.Error) - return ok && err.Temporary() -} - -func (e *SystemError) Timeout() bool { - err, ok := e.Err.(net.Error) - return ok && err.Timeout() -} - -func makeSystemError(system *System, op string, err error, events []ErrorEvent) error { - // Don't double wrap errors - if _, ok := err.(*SystemError); ok { - return err - } - return &SystemError{ - ID: system.ID(), - Op: op, - Err: err, - Events: events, - } -} - -func (e *ProcessError) Error() string { - s := fmt.Sprintf("%s %s:%d: %s", e.Op, e.SystemID, e.Pid, e.Err.Error()) - for _, ev := range e.Events { - s += "\n" + ev.String() - } - return s -} - -func (e *ProcessError) Temporary() bool { - err, ok := e.Err.(net.Error) - return ok && err.Temporary() -} - -func (e *ProcessError) Timeout() bool { - err, ok := e.Err.(net.Error) - return ok && err.Timeout() -} - -func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error { - // Don't double wrap errors - if _, ok := err.(*ProcessError); ok { - return err - } - return &ProcessError{ - Pid: process.Pid(), - SystemID: process.SystemID(), - Op: op, - Err: err, - Events: events, - } -} - -// IsNotExist checks if an error is caused by the Container or Process not existing. -// Note: Currently, ErrElementNotFound can mean that a Process has either -// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound. -func IsNotExist(err error) bool { - err = getInnerError(err) - return err == ErrComputeSystemDoesNotExist || - err == ErrElementNotFound -} - -// IsErrorInvalidHandle checks whether the error is the result of an operation carried -// out on a handle that is invalid/closed. This error popped up while trying to query -// stats on a container in the process of being stopped. -func IsErrorInvalidHandle(err error) bool { - err = getInnerError(err) - return err == ErrInvalidHandle -} - -// IsAlreadyClosed checks if an error is caused by the Container or Process having been -// already closed by a call to the Close() method. -func IsAlreadyClosed(err error) bool { - err = getInnerError(err) - return err == ErrAlreadyClosed -} - -// IsPending returns a boolean indicating whether the error is that -// the requested operation is being completed in the background. -func IsPending(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeOperationPending -} - -// IsTimeout returns a boolean indicating whether the error is caused by -// a timeout waiting for the operation to complete. -func IsTimeout(err error) bool { - if err, ok := err.(net.Error); ok && err.Timeout() { - return true - } - err = getInnerError(err) - return err == ErrTimeout -} - -// IsAlreadyStopped returns a boolean indicating whether the error is caused by -// a Container or Process being already stopped. -// Note: Currently, ErrElementNotFound can mean that a Process has either -// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound. -func IsAlreadyStopped(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeAlreadyStopped || - err == ErrProcessAlreadyStopped || - err == ErrElementNotFound -} - -// IsNotSupported returns a boolean indicating whether the error is caused by -// unsupported platform requests -// Note: Currently Unsupported platform requests can be mean either -// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage -// is thrown from the Platform -func IsNotSupported(err error) bool { - err = getInnerError(err) - // If Platform doesn't recognize or support the request sent, below errors are seen - return err == ErrVmcomputeInvalidJSON || - err == ErrInvalidData || - err == ErrNotSupported || - err == ErrVmcomputeUnknownMessage -} - -// IsOperationInvalidState returns true when err is caused by -// `ErrVmcomputeOperationInvalidState`. -func IsOperationInvalidState(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeOperationInvalidState -} - -// IsAccessIsDenied returns true when err is caused by -// `ErrVmcomputeOperationAccessIsDenied`. -func IsAccessIsDenied(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeOperationAccessIsDenied -} - -func getInnerError(err error) error { - switch pe := err.(type) { - case nil: - return nil - case *HcsError: - err = pe.Err - case *SystemError: - err = pe.Err - case *ProcessError: - err = pe.Err - } - return err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go deleted file mode 100644 index c056636e8b..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ /dev/null @@ -1,558 +0,0 @@ -//go:build windows - -package hcs - -import ( - "context" - "encoding/json" - "errors" - "io" - "os" - "sync" - "syscall" - "time" - - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/vmcompute" - "go.opencensus.io/trace" -) - -// ContainerError is an error encountered in HCS -type Process struct { - handleLock sync.RWMutex - handle vmcompute.HcsProcess - processID int - system *System - hasCachedStdio bool - stdioLock sync.Mutex - stdin io.WriteCloser - stdout io.ReadCloser - stderr io.ReadCloser - callbackNumber uintptr - killSignalDelivered bool - - closedWaitOnce sync.Once - waitBlock chan struct{} - exitCode int - waitError error -} - -func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process { - return &Process{ - handle: process, - processID: processID, - system: computeSystem, - waitBlock: make(chan struct{}), - } -} - -type processModifyRequest struct { - Operation string - ConsoleSize *consoleSize `json:",omitempty"` - CloseHandle *closeHandle `json:",omitempty"` -} - -type consoleSize struct { - Height uint16 - Width uint16 -} - -type closeHandle struct { - Handle string -} - -type processStatus struct { - ProcessID uint32 - Exited bool - ExitCode uint32 - LastWaitResult int32 -} - -const stdIn string = "StdIn" - -const ( - modifyConsoleSize string = "ConsoleSize" - modifyCloseHandle string = "CloseHandle" -) - -// Pid returns the process ID of the process within the container. -func (process *Process) Pid() int { - return process.processID -} - -// SystemID returns the ID of the process's compute system. -func (process *Process) SystemID() string { - return process.system.ID() -} - -func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { - switch err { - case nil: - return true, nil - case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: - select { - case <-process.waitBlock: - // The process exit notification has already arrived. - default: - // The process should be gone, but we have not received the notification. - // After a second, force unblock the process wait to work around a possible - // deadlock in the HCS. - go func() { - time.Sleep(time.Second) - process.closedWaitOnce.Do(func() { - log.G(ctx).WithError(err).Warn("force unblocking process waits") - process.exitCode = -1 - process.waitError = err - close(process.waitBlock) - }) - }() - } - return false, nil - default: - return false, err - } -} - -// Signal signals the process with `options`. -// -// For LCOW `guestresource.SignalProcessOptionsLCOW`. -// -// For WCOW `guestresource.SignalProcessOptionsWCOW`. -func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcs::Process::Signal" - - if process.handle == 0 { - return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - optionsb, err := json.Marshal(options) - if err != nil { - return false, err - } - - resultJSON, err := vmcompute.HcsSignalProcess(ctx, process.handle, string(optionsb)) - events := processHcsResult(ctx, resultJSON) - delivered, err := process.processSignalResult(ctx, err) - if err != nil { - err = makeProcessError(process, operation, err, events) - } - return delivered, err -} - -// Kill signals the process to terminate but does not wait for it to finish terminating. -func (process *Process) Kill(ctx context.Context) (bool, error) { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcs::Process::Kill" - - if process.handle == 0 { - return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - if process.killSignalDelivered { - // A kill signal has already been sent to this process. Sending a second - // one offers no real benefit, as processes cannot stop themselves from - // being terminated, once a TerminateProcess has been issued. Sending a - // second kill may result in a number of errors (two of which detailed bellow) - // and which we can avoid handling. - return true, nil - } - - resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle) - if err != nil { - // We still need to check these two cases, as processes may still be killed by an - // external actor (human operator, OOM, random script etc). - if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) { - // There are two cases where it should be safe to ignore an error returned - // by HcsTerminateProcess. The first one is cause by the fact that - // HcsTerminateProcess ends up calling TerminateProcess in the context - // of a container. According to the TerminateProcess documentation: - // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks - // After a process has terminated, call to TerminateProcess with open - // handles to the process fails with ERROR_ACCESS_DENIED (5) error code. - // It's safe to ignore this error here. HCS should always have permissions - // to kill processes inside any container. So an ERROR_ACCESS_DENIED - // is unlikely to be anything else than what the ending remarks in the - // documentation states. - // - // The second case is generated by hcs itself, if for any reason HcsTerminateProcess - // is called twice in a very short amount of time. In such cases, hcs may return - // HCS_E_PROCESS_ALREADY_STOPPED. - return true, nil - } - } - events := processHcsResult(ctx, resultJSON) - delivered, err := process.processSignalResult(ctx, err) - if err != nil { - err = makeProcessError(process, operation, err, events) - } - - process.killSignalDelivered = delivered - return delivered, err -} - -// waitBackground waits for the process exit notification. Once received sets -// `process.waitError` (if any) and unblocks all `Wait` calls. -// -// This MUST be called exactly once per `process.handle` but `Wait` is safe to -// call multiple times. -func (process *Process) waitBackground() { - operation := "hcs::Process::waitBackground" - ctx, span := oc.StartSpan(context.Background(), operation) - defer span.End() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - var ( - err error - exitCode = -1 - propertiesJSON string - resultJSON string - ) - - err = waitForNotification(ctx, process.callbackNumber, hcsNotificationProcessExited, nil) - if err != nil { - err = makeProcessError(process, operation, err, nil) - log.G(ctx).WithError(err).Error("failed wait") - } else { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - // Make sure we didnt race with Close() here - if process.handle != 0 { - propertiesJSON, resultJSON, err = vmcompute.HcsGetProcessProperties(ctx, process.handle) - events := processHcsResult(ctx, resultJSON) - if err != nil { - err = makeProcessError(process, operation, err, events) //nolint:ineffassign - } else { - properties := &processStatus{} - err = json.Unmarshal([]byte(propertiesJSON), properties) - if err != nil { - err = makeProcessError(process, operation, err, nil) //nolint:ineffassign - } else { - if properties.LastWaitResult != 0 { - log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result") - } else { - exitCode = int(properties.ExitCode) - } - } - } - } - } - log.G(ctx).WithField("exitCode", exitCode).Debug("process exited") - - process.closedWaitOnce.Do(func() { - process.exitCode = exitCode - process.waitError = err - close(process.waitBlock) - }) - oc.SetSpanStatus(span, err) -} - -// Wait waits for the process to exit. If the process has already exited returns -// the previous error (if any). -func (process *Process) Wait() error { - <-process.waitBlock - return process.waitError -} - -// ResizeConsole resizes the console of the process. -func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcs::Process::ResizeConsole" - - if process.handle == 0 { - return makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - modifyRequest := processModifyRequest{ - Operation: modifyConsoleSize, - ConsoleSize: &consoleSize{ - Height: height, - Width: width, - }, - } - - modifyRequestb, err := json.Marshal(modifyRequest) - if err != nil { - return err - } - - resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return makeProcessError(process, operation, err, events) - } - - return nil -} - -// ExitCode returns the exit code of the process. The process must have -// already terminated. -func (process *Process) ExitCode() (int, error) { - select { - case <-process.waitBlock: - if process.waitError != nil { - return -1, process.waitError - } - return process.exitCode, nil - default: - return -1, makeProcessError(process, "hcs::Process::ExitCode", ErrInvalidProcessState, nil) - } -} - -// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing -// these pipes does not close the underlying pipes. Once returned, these pipes -// are the responsibility of the caller to close. -func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { - operation := "hcs::Process::StdioLegacy" - ctx, span := oc.StartSpan(context.Background(), operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - if process.handle == 0 { - return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - if process.hasCachedStdio { - stdin, stdout, stderr := process.stdin, process.stdout, process.stderr - process.stdin, process.stdout, process.stderr = nil, nil, nil - process.hasCachedStdio = false - return stdin, stdout, stderr, nil - } - - processInfo, resultJSON, err := vmcompute.HcsGetProcessInfo(ctx, process.handle) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, nil, nil, makeProcessError(process, operation, err, events) - } - - pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) - if err != nil { - return nil, nil, nil, makeProcessError(process, operation, err, nil) - } - - return pipes[0], pipes[1], pipes[2], nil -} - -// Stdio returns the stdin, stdout, and stderr pipes, respectively. -// To close them, close the process handle. -func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) { - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - return process.stdin, process.stdout, process.stderr -} - -// CloseStdin closes the write side of the stdin pipe so that the process is -// notified on the read side that there is no more data in stdin. -func (process *Process) CloseStdin(ctx context.Context) error { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcs::Process::CloseStdin" - - if process.handle == 0 { - return makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - modifyRequest := processModifyRequest{ - Operation: modifyCloseHandle, - CloseHandle: &closeHandle{ - Handle: stdIn, - }, - } - - modifyRequestb, err := json.Marshal(modifyRequest) - if err != nil { - return err - } - - resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return makeProcessError(process, operation, err, events) - } - - process.stdioLock.Lock() - if process.stdin != nil { - process.stdin.Close() - process.stdin = nil - } - process.stdioLock.Unlock() - - return nil -} - -func (process *Process) CloseStdout(ctx context.Context) (err error) { - ctx, span := oc.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - process.handleLock.Lock() - defer process.handleLock.Unlock() - - if process.handle == 0 { - return nil - } - - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - if process.stdout != nil { - process.stdout.Close() - process.stdout = nil - } - return nil -} - -func (process *Process) CloseStderr(ctx context.Context) (err error) { - ctx, span := oc.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - process.handleLock.Lock() - defer process.handleLock.Unlock() - - if process.handle == 0 { - return nil - } - - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - if process.stderr != nil { - process.stderr.Close() - process.stderr = nil - } - return nil -} - -// Close cleans up any state associated with the process but does not kill -// or wait on it. -func (process *Process) Close() (err error) { - operation := "hcs::Process::Close" - ctx, span := oc.StartSpan(context.Background(), operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - process.handleLock.Lock() - defer process.handleLock.Unlock() - - // Don't double free this - if process.handle == 0 { - return nil - } - - process.stdioLock.Lock() - if process.stdin != nil { - process.stdin.Close() - process.stdin = nil - } - if process.stdout != nil { - process.stdout.Close() - process.stdout = nil - } - if process.stderr != nil { - process.stderr.Close() - process.stderr = nil - } - process.stdioLock.Unlock() - - if err = process.unregisterCallback(ctx); err != nil { - return makeProcessError(process, operation, err, nil) - } - - if err = vmcompute.HcsCloseProcess(ctx, process.handle); err != nil { - return makeProcessError(process, operation, err, nil) - } - - process.handle = 0 - process.closedWaitOnce.Do(func() { - process.exitCode = -1 - process.waitError = ErrAlreadyClosed - close(process.waitBlock) - }) - - return nil -} - -func (process *Process) registerCallback(ctx context.Context) error { - callbackContext := ¬ificationWatcherContext{ - channels: newProcessChannels(), - systemID: process.SystemID(), - processID: process.processID, - } - - callbackMapLock.Lock() - callbackNumber := nextCallback - nextCallback++ - callbackMap[callbackNumber] = callbackContext - callbackMapLock.Unlock() - - callbackHandle, err := vmcompute.HcsRegisterProcessCallback(ctx, process.handle, notificationWatcherCallback, callbackNumber) - if err != nil { - return err - } - callbackContext.handle = callbackHandle - process.callbackNumber = callbackNumber - - return nil -} - -func (process *Process) unregisterCallback(ctx context.Context) error { - callbackNumber := process.callbackNumber - - callbackMapLock.RLock() - callbackContext := callbackMap[callbackNumber] - callbackMapLock.RUnlock() - - if callbackContext == nil { - return nil - } - - handle := callbackContext.handle - - if handle == 0 { - return nil - } - - // vmcompute.HcsUnregisterProcessCallback has its own synchronization to - // wait for all callbacks to complete. We must NOT hold the callbackMapLock. - err := vmcompute.HcsUnregisterProcessCallback(ctx, handle) - if err != nil { - return err - } - - closeChannels(callbackContext.channels) - - callbackMapLock.Lock() - delete(callbackMap, callbackNumber) - callbackMapLock.Unlock() - - handle = 0 //nolint:ineffassign - - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go deleted file mode 100644 index d1f219cfad..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go +++ /dev/null @@ -1,252 +0,0 @@ -//go:build windows - -package schema1 - -import ( - "encoding/json" - "time" - - "github.com/Microsoft/go-winio/pkg/guid" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// ProcessConfig is used as both the input of Container.CreateProcess -// and to convert the parameters to JSON for passing onto the HCS -type ProcessConfig struct { - ApplicationName string `json:",omitempty"` - CommandLine string `json:",omitempty"` - CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows - User string `json:",omitempty"` - WorkingDirectory string `json:",omitempty"` - Environment map[string]string `json:",omitempty"` - EmulateConsole bool `json:",omitempty"` - CreateStdInPipe bool `json:",omitempty"` - CreateStdOutPipe bool `json:",omitempty"` - CreateStdErrPipe bool `json:",omitempty"` - ConsoleSize [2]uint `json:",omitempty"` - CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows - OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows -} - -type Layer struct { - ID string - Path string -} - -type MappedDir struct { - HostPath string - ContainerPath string - ReadOnly bool - BandwidthMaximum uint64 - IOPSMaximum uint64 - CreateInUtilityVM bool - // LinuxMetadata - Support added in 1803/RS4+. - LinuxMetadata bool `json:",omitempty"` -} - -type MappedPipe struct { - HostPath string - ContainerPipeName string -} - -type HvRuntime struct { - ImagePath string `json:",omitempty"` - SkipTemplate bool `json:",omitempty"` - LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM - LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM - LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode - BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD - WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD -} - -type MappedVirtualDisk struct { - HostPath string `json:",omitempty"` // Path to VHD on the host - ContainerPath string // Platform-specific mount point path in the container - CreateInUtilityVM bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing" - AttachOnly bool `json:",omitempty"` -} - -// AssignedDevice represents a device that has been directly assigned to a container -// -// NOTE: Support added in RS5 -type AssignedDevice struct { - // InterfaceClassGUID of the device to assign to container. - InterfaceClassGUID string `json:"InterfaceClassGuid,omitempty"` -} - -// ContainerConfig is used as both the input of CreateContainer -// and to convert the parameters to JSON for passing onto the HCS -type ContainerConfig struct { - SystemType string // HCS requires this to be hard-coded to "Container" - Name string // Name of the container. We use the docker ID. - Owner string `json:",omitempty"` // The management platform that created this container - VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID} - IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows - LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID - Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID - Credentials string `json:",omitempty"` // Credentials information - ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container. - ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares. - ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit. - StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS - StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second - StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller - MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes - HostName string `json:",omitempty"` // Hostname - MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts) - MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes - HvPartition bool // True if it a Hyper-V Container - NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with. - EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container - HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM - Servicing bool `json:",omitempty"` // True if this container is for servicing - AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution - DNSSearchList string `json:",omitempty"` // Comma separated list of DNS suffixes to use for name resolution - ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise. - TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed - MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start - AssignedDevices []AssignedDevice `json:",omitempty"` // Array of devices to assign. NOTE: Support added in RS5 -} - -type ComputeSystemQuery struct { - IDs []string `json:"Ids,omitempty"` - Types []string `json:",omitempty"` - Names []string `json:",omitempty"` - Owners []string `json:",omitempty"` -} - -type PropertyType string - -const ( - PropertyTypeStatistics PropertyType = "Statistics" // V1 and V2 - PropertyTypeProcessList PropertyType = "ProcessList" // V1 and V2 - PropertyTypeMappedVirtualDisk PropertyType = "MappedVirtualDisk" // Not supported in V2 schema call - PropertyTypeGuestConnection PropertyType = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 -) - -type PropertyQuery struct { - PropertyTypes []PropertyType `json:",omitempty"` -} - -// ContainerProperties holds the properties for a container and the processes running in that container -type ContainerProperties struct { - ID string `json:"Id"` - State string - Name string - SystemType string - RuntimeOSType string `json:"RuntimeOsType,omitempty"` - Owner string - SiloGUID string `json:"SiloGuid,omitempty"` - RuntimeID guid.GUID `json:"RuntimeId,omitempty"` - IsRuntimeTemplate bool `json:",omitempty"` - RuntimeImagePath string `json:",omitempty"` - Stopped bool `json:",omitempty"` - ExitType string `json:",omitempty"` - AreUpdatesPending bool `json:",omitempty"` - ObRoot string `json:",omitempty"` - Statistics Statistics `json:",omitempty"` - ProcessList []ProcessListItem `json:",omitempty"` - MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"` - GuestConnectionInfo GuestConnectionInfo `json:",omitempty"` -} - -// MemoryStats holds the memory statistics for a container -type MemoryStats struct { - UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` - UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` - UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` -} - -// ProcessorStats holds the processor statistics for a container -type ProcessorStats struct { - TotalRuntime100ns uint64 `json:",omitempty"` - RuntimeUser100ns uint64 `json:",omitempty"` - RuntimeKernel100ns uint64 `json:",omitempty"` -} - -// StorageStats holds the storage statistics for a container -type StorageStats struct { - ReadCountNormalized uint64 `json:",omitempty"` - ReadSizeBytes uint64 `json:",omitempty"` - WriteCountNormalized uint64 `json:",omitempty"` - WriteSizeBytes uint64 `json:",omitempty"` -} - -// NetworkStats holds the network statistics for a container -type NetworkStats struct { - BytesReceived uint64 `json:",omitempty"` - BytesSent uint64 `json:",omitempty"` - PacketsReceived uint64 `json:",omitempty"` - PacketsSent uint64 `json:",omitempty"` - DroppedPacketsIncoming uint64 `json:",omitempty"` - DroppedPacketsOutgoing uint64 `json:",omitempty"` - EndpointId string `json:",omitempty"` - InstanceId string `json:",omitempty"` -} - -// Statistics is the structure returned by a statistics call on a container -type Statistics struct { - Timestamp time.Time `json:",omitempty"` - ContainerStartTime time.Time `json:",omitempty"` - Uptime100ns uint64 `json:",omitempty"` - Memory MemoryStats `json:",omitempty"` - Processor ProcessorStats `json:",omitempty"` - Storage StorageStats `json:",omitempty"` - Network []NetworkStats `json:",omitempty"` -} - -// ProcessList is the structure of an item returned by a ProcessList call on a container -type ProcessListItem struct { - CreateTimestamp time.Time `json:",omitempty"` - ImageName string `json:",omitempty"` - KernelTime100ns uint64 `json:",omitempty"` - MemoryCommitBytes uint64 `json:",omitempty"` - MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"` - MemoryWorkingSetSharedBytes uint64 `json:",omitempty"` - ProcessId uint32 `json:",omitempty"` - UserTime100ns uint64 `json:",omitempty"` -} - -// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container -type MappedVirtualDiskController struct { - MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"` -} - -// GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM -type GuestDefinedCapabilities struct { - NamespaceAddRequestSupported bool `json:",omitempty"` - SignalProcessSupported bool `json:",omitempty"` - DumpStacksSupported bool `json:",omitempty"` - DeleteContainerStateSupported bool `json:",omitempty"` - UpdateContainerSupported bool `json:",omitempty"` -} - -// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM -type GuestConnectionInfo struct { - SupportedSchemaVersions []hcsschema.Version `json:",omitempty"` - ProtocolVersion uint32 `json:",omitempty"` - GuestDefinedCapabilities GuestDefinedCapabilities `json:",omitempty"` -} - -// Type of Request Support in ModifySystem -type RequestType string - -// Type of Resource Support in ModifySystem -type ResourceType string - -// RequestType const -const ( - Add RequestType = "Add" - Remove RequestType = "Remove" - Network ResourceType = "Network" -) - -// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type ResourceModificationRequestResponse struct { - Resource ResourceType `json:"ResourceType"` - Data interface{} `json:"Settings"` - Request RequestType `json:"RequestType,omitempty"` -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go deleted file mode 100644 index 6364da8e23..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -import "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - -type ModifySettingRequest struct { - ResourcePath string `json:"ResourcePath,omitempty"` - - RequestType guestrequest.RequestType `json:"RequestType,omitempty"` // NOTE: Swagger generated as string. Locally updated. - - Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated - - GuestRequest interface{} `json:"GuestRequest,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go deleted file mode 100644 index a46b0051df..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build windows - -package hcs - -import ( - "context" - "encoding/json" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/vmcompute" -) - -// GetServiceProperties returns properties of the host compute service. -func GetServiceProperties(ctx context.Context, q hcsschema.PropertyQuery) (*hcsschema.ServiceProperties, error) { - operation := "hcs::GetServiceProperties" - - queryb, err := json.Marshal(q) - if err != nil { - return nil, err - } - propertiesJSON, resultJSON, err := vmcompute.HcsGetServiceProperties(ctx, string(queryb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, &HcsError{Op: operation, Err: err, Events: events} - } - - if propertiesJSON == "" { - return nil, ErrUnexpectedValue - } - properties := &hcsschema.ServiceProperties{} - if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { - return nil, err - } - return properties, nil -} - -// ModifyServiceSettings modifies settings of the host compute service. -func ModifyServiceSettings(ctx context.Context, settings hcsschema.ModificationRequest) error { - operation := "hcs::ModifyServiceSettings" - - settingsJSON, err := json.Marshal(settings) - if err != nil { - return err - } - resultJSON, err := vmcompute.HcsModifyServiceSettings(ctx, string(settingsJSON)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return &HcsError{Op: operation, Err: err, Events: events} - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go deleted file mode 100644 index 2f6e4e89e4..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ /dev/null @@ -1,814 +0,0 @@ -//go:build windows - -package hcs - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strings" - "sync" - "syscall" - "time" - - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/jobobject" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/timeout" - "github.com/Microsoft/hcsshim/internal/vmcompute" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -type System struct { - handleLock sync.RWMutex - handle vmcompute.HcsSystem - id string - callbackNumber uintptr - - closedWaitOnce sync.Once - waitBlock chan struct{} - waitError error - exitError error - os, typ, owner string - startTime time.Time -} - -func newSystem(id string) *System { - return &System{ - id: id, - waitBlock: make(chan struct{}), - } -} - -// Implementation detail for silo naming, this should NOT be relied upon very heavily. -func siloNameFmt(containerID string) string { - return fmt.Sprintf(`\Container_%s`, containerID) -} - -// CreateComputeSystem creates a new compute system with the given configuration but does not start it. -func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) { - operation := "hcs::CreateComputeSystem" - - // hcsCreateComputeSystemContext is an async operation. Start the outer span - // here to measure the full create time. - ctx, span := oc.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", id)) - - computeSystem := newSystem(id) - - hcsDocumentB, err := json.Marshal(hcsDocumentInterface) - if err != nil { - return nil, err - } - - hcsDocument := string(hcsDocumentB) - - var ( - identity syscall.Handle - resultJSON string - createError error - ) - computeSystem.handle, resultJSON, createError = vmcompute.HcsCreateComputeSystem(ctx, id, hcsDocument, identity) - if createError == nil || IsPending(createError) { - defer func() { - if err != nil { - computeSystem.Close() - } - }() - if err = computeSystem.registerCallback(ctx); err != nil { - // Terminate the compute system if it still exists. We're okay to - // ignore a failure here. - _ = computeSystem.Terminate(ctx) - return nil, makeSystemError(computeSystem, operation, err, nil) - } - } - - events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) - if err != nil { - if err == ErrTimeout { - // Terminate the compute system if it still exists. We're okay to - // ignore a failure here. - _ = computeSystem.Terminate(ctx) - } - return nil, makeSystemError(computeSystem, operation, err, events) - } - go computeSystem.waitBackground() - if err = computeSystem.getCachedProperties(ctx); err != nil { - return nil, err - } - return computeSystem, nil -} - -// OpenComputeSystem opens an existing compute system by ID. -func OpenComputeSystem(ctx context.Context, id string) (*System, error) { - operation := "hcs::OpenComputeSystem" - - computeSystem := newSystem(id) - handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, events) - } - computeSystem.handle = handle - defer func() { - if err != nil { - computeSystem.Close() - } - }() - if err = computeSystem.registerCallback(ctx); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - go computeSystem.waitBackground() - if err = computeSystem.getCachedProperties(ctx); err != nil { - return nil, err - } - return computeSystem, nil -} - -func (computeSystem *System) getCachedProperties(ctx context.Context) error { - props, err := computeSystem.Properties(ctx) - if err != nil { - return err - } - computeSystem.typ = strings.ToLower(props.SystemType) - computeSystem.os = strings.ToLower(props.RuntimeOSType) - computeSystem.owner = strings.ToLower(props.Owner) - if computeSystem.os == "" && computeSystem.typ == "container" { - // Pre-RS5 HCS did not return the OS, but it only supported containers - // that ran Windows. - computeSystem.os = "windows" - } - return nil -} - -// OS returns the operating system of the compute system, "linux" or "windows". -func (computeSystem *System) OS() string { - return computeSystem.os -} - -// IsOCI returns whether processes in the compute system should be created via -// OCI. -func (computeSystem *System) IsOCI() bool { - return computeSystem.os == "linux" && computeSystem.typ == "container" -} - -// GetComputeSystems gets a list of the compute systems on the system that match the query -func GetComputeSystems(ctx context.Context, q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) { - operation := "hcs::GetComputeSystems" - - queryb, err := json.Marshal(q) - if err != nil { - return nil, err - } - - computeSystemsJSON, resultJSON, err := vmcompute.HcsEnumerateComputeSystems(ctx, string(queryb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, &HcsError{Op: operation, Err: err, Events: events} - } - - if computeSystemsJSON == "" { - return nil, ErrUnexpectedValue - } - computeSystems := []schema1.ContainerProperties{} - if err = json.Unmarshal([]byte(computeSystemsJSON), &computeSystems); err != nil { - return nil, err - } - - return computeSystems, nil -} - -// Start synchronously starts the computeSystem. -func (computeSystem *System) Start(ctx context.Context) (err error) { - operation := "hcs::System::Start" - - // hcsStartComputeSystemContext is an async operation. Start the outer span - // here to measure the full start time. - ctx, span := oc.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "") - events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - computeSystem.startTime = time.Now() - return nil -} - -// ID returns the compute system's identifier. -func (computeSystem *System) ID() string { - return computeSystem.id -} - -// Shutdown requests a compute system shutdown. -func (computeSystem *System) Shutdown(ctx context.Context) error { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::Shutdown" - - if computeSystem.handle == 0 { - return nil - } - - resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") - events := processHcsResult(ctx, resultJSON) - switch err { - case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: - default: - return makeSystemError(computeSystem, operation, err, events) - } - return nil -} - -// Terminate requests a compute system terminate. -func (computeSystem *System) Terminate(ctx context.Context) error { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::Terminate" - - if computeSystem.handle == 0 { - return nil - } - - resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") - events := processHcsResult(ctx, resultJSON) - switch err { - case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: - default: - return makeSystemError(computeSystem, operation, err, events) - } - return nil -} - -// waitBackground waits for the compute system exit notification. Once received -// sets `computeSystem.waitError` (if any) and unblocks all `Wait` calls. -// -// This MUST be called exactly once per `computeSystem.handle` but `Wait` is -// safe to call multiple times. -func (computeSystem *System) waitBackground() { - operation := "hcs::System::waitBackground" - ctx, span := oc.StartSpan(context.Background(), operation) - defer span.End() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - switch err { - case nil: - log.G(ctx).Debug("system exited") - case ErrVmcomputeUnexpectedExit: - log.G(ctx).Debug("unexpected system exit") - computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil) - err = nil - default: - err = makeSystemError(computeSystem, operation, err, nil) - } - computeSystem.closedWaitOnce.Do(func() { - computeSystem.waitError = err - close(computeSystem.waitBlock) - }) - oc.SetSpanStatus(span, err) -} - -// Wait synchronously waits for the compute system to shutdown or terminate. If -// the compute system has already exited returns the previous error (if any). -func (computeSystem *System) Wait() error { - <-computeSystem.waitBlock - return computeSystem.waitError -} - -// ExitError returns an error describing the reason the compute system terminated. -func (computeSystem *System) ExitError() error { - select { - case <-computeSystem.waitBlock: - if computeSystem.waitError != nil { - return computeSystem.waitError - } - return computeSystem.exitError - default: - return errors.New("container not exited") - } -} - -// Properties returns the requested container properties targeting a V1 schema container. -func (computeSystem *System) Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::Properties" - - queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types}) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - - propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, events) - } - - if propertiesJSON == "" { - return nil, ErrUnexpectedValue - } - properties := &schema1.ContainerProperties{} - if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - - return properties, nil -} - -// queryInProc handles querying for container properties without reaching out to HCS. `props` -// will be updated to contain any data returned from the queries present in `types`. If any properties -// failed to be queried they will be tallied up and returned in as the first return value. Failures on -// query are NOT considered errors; the only failure case for this method is if the containers job object -// cannot be opened. -func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) { - // In the future we can make use of some new functionality in the HCS that allows you - // to pass a job object for HCS to use for the container. Currently, the only way we'll - // be able to open the job/silo is if we're running as SYSTEM. - jobOptions := &jobobject.Options{ - UseNTVariant: true, - Name: siloNameFmt(computeSystem.id), - } - job, err := jobobject.Open(ctx, jobOptions) - if err != nil { - return nil, err - } - defer job.Close() - - var fallbackQueryTypes []hcsschema.PropertyType - for _, propType := range types { - switch propType { - case hcsschema.PTStatistics: - // Handle a bad caller asking for the same type twice. No use in re-querying if this is - // filled in already. - if props.Statistics == nil { - props.Statistics, err = computeSystem.statisticsInProc(job) - if err != nil { - log.G(ctx).WithError(err).Warn("failed to get statistics in-proc") - - fallbackQueryTypes = append(fallbackQueryTypes, propType) - } - } - default: - fallbackQueryTypes = append(fallbackQueryTypes, propType) - } - } - - return fallbackQueryTypes, nil -} - -// statisticsInProc emulates what HCS does to grab statistics for a given container with a small -// change to make grabbing the private working set total much more efficient. -func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) { - // Start timestamp for these stats before we grab them to match HCS - timestamp := time.Now() - - memInfo, err := job.QueryMemoryStats() - if err != nil { - return nil, err - } - - processorInfo, err := job.QueryProcessorStats() - if err != nil { - return nil, err - } - - storageInfo, err := job.QueryStorageStats() - if err != nil { - return nil, err - } - - // This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation - // with the class SystemProcessInformation which returns an array containing system information for *every* - // process running on the machine. They then grab the pids that are running in the container and filter down - // the entries in the array to only what's running in that silo and start tallying up the total. This doesn't - // work well as performance should get worse if more processess are running on the machine in general and not - // just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored - // as well which isn't great and is wasted work to fetch. - // - // HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private - // working set ourselves and ask for everything else seperately. The optimization we can make here is - // to open the silo ourselves and do the same queries for the rest of the info, as well as calculating - // the private working set in a more efficient manner by: - // - // 1. Find the pids running in the silo - // 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access) - // 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters - // 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2. - privateWorkingSet, err := job.QueryPrivateWorkingSet() - if err != nil { - return nil, err - } - - return &hcsschema.Statistics{ - Timestamp: timestamp, - ContainerStartTime: computeSystem.startTime, - Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100, - Memory: &hcsschema.MemoryStats{ - MemoryUsageCommitBytes: memInfo.JobMemory, - MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed, - MemoryUsagePrivateWorkingSetBytes: privateWorkingSet, - }, - Processor: &hcsschema.ProcessorStats{ - RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime), - RuntimeUser100ns: uint64(processorInfo.TotalUserTime), - TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime), - }, - Storage: &hcsschema.StorageStats{ - ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount), - ReadSizeBytes: storageInfo.ReadStats.TotalSize, - WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount), - WriteSizeBytes: storageInfo.WriteStats.TotalSize, - }, - }, nil -} - -// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types. -func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) { - operation := "hcs::System::PropertiesV2" - - queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - - propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, events) - } - - if propertiesJSON == "" { - return nil, ErrUnexpectedValue - } - props := &hcsschema.Properties{} - if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - - return props, nil -} - -// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system. -func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - // Let HCS tally up the total for VM based queries instead of querying ourselves. - if computeSystem.typ != "container" { - return computeSystem.hcsPropertiesV2Query(ctx, types) - } - - // Define a starter Properties struct with the default fields returned from every - // query. Owner is only returned from Statistics but it's harmless to include. - properties := &hcsschema.Properties{ - Id: computeSystem.id, - SystemType: computeSystem.typ, - RuntimeOsType: computeSystem.os, - Owner: computeSystem.owner, - } - - logEntry := log.G(ctx) - // First lets try and query ourselves without reaching to HCS. If any of the queries fail - // we'll take note and fallback to querying HCS for any of the failed types. - fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types) - if err == nil && len(fallbackTypes) == 0 { - return properties, nil - } else if err != nil { - logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err)) - fallbackTypes = types - } - - logEntry.WithFields(logrus.Fields{ - logfields.ContainerID: computeSystem.id, - "propertyTypes": fallbackTypes, - }).Info("falling back to HCS for property type queries") - - hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes) - if err != nil { - return nil, err - } - - // Now add in anything that we might have successfully queried in process. - if properties.Statistics != nil { - hcsProperties.Statistics = properties.Statistics - hcsProperties.Owner = properties.Owner - } - - // For future support for querying processlist in-proc as well. - if properties.ProcessList != nil { - hcsProperties.ProcessList = properties.ProcessList - } - - return hcsProperties, nil -} - -// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. -func (computeSystem *System) Pause(ctx context.Context) (err error) { - operation := "hcs::System::Pause" - - // hcsPauseComputeSystemContext is an async peration. Start the outer span - // here to measure the full pause time. - ctx, span := oc.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "") - events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} - -// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5. -func (computeSystem *System) Resume(ctx context.Context) (err error) { - operation := "hcs::System::Resume" - - // hcsResumeComputeSystemContext is an async operation. Start the outer span - // here to measure the full restore time. - ctx, span := oc.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "") - events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} - -// Save the compute system -func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) { - operation := "hcs::System::Save" - - // hcsSaveComputeSystemContext is an async operation. Start the outer span - // here to measure the full save time. - ctx, span := oc.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - saveOptions, err := json.Marshal(options) - if err != nil { - return err - } - - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - result, err := vmcompute.HcsSaveComputeSystem(ctx, computeSystem.handle, string(saveOptions)) - events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber, hcsNotificationSystemSaveCompleted, &timeout.SystemSave) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} - -func (computeSystem *System) createProcess(ctx context.Context, operation string, c interface{}) (*Process, *vmcompute.HcsProcessInformation, error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return nil, nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - configurationb, err := json.Marshal(c) - if err != nil { - return nil, nil, makeSystemError(computeSystem, operation, err, nil) - } - - configuration := string(configurationb) - processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration) - events := processHcsResult(ctx, resultJSON) - if err != nil { - if v2, ok := c.(*hcsschema.ProcessParameters); ok { - operation += ": " + v2.CommandLine - } else if v1, ok := c.(*schema1.ProcessConfig); ok { - operation += ": " + v1.CommandLine - } - return nil, nil, makeSystemError(computeSystem, operation, err, events) - } - - log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid") - return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil -} - -// CreateProcess launches a new process within the computeSystem. -func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) { - operation := "hcs::System::CreateProcess" - process, processInfo, err := computeSystem.createProcess(ctx, operation, c) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - process.Close() - } - }() - - pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - process.stdin = pipes[0] - process.stdout = pipes[1] - process.stderr = pipes[2] - process.hasCachedStdio = true - - if err = process.registerCallback(ctx); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - go process.waitBackground() - - return process, nil -} - -// OpenProcess gets an interface to an existing process within the computeSystem. -func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process, error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::OpenProcess" - - if computeSystem.handle == 0 { - return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, events) - } - - process := newProcess(processHandle, pid, computeSystem) - if err = process.registerCallback(ctx); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - go process.waitBackground() - - return process, nil -} - -// Close cleans up any state associated with the compute system but does not terminate or wait for it. -func (computeSystem *System) Close() (err error) { - operation := "hcs::System::Close" - ctx, span := oc.StartSpan(context.Background(), operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - computeSystem.handleLock.Lock() - defer computeSystem.handleLock.Unlock() - - // Don't double free this - if computeSystem.handle == 0 { - return nil - } - - if err = computeSystem.unregisterCallback(ctx); err != nil { - return makeSystemError(computeSystem, operation, err, nil) - } - - err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle) - if err != nil { - return makeSystemError(computeSystem, operation, err, nil) - } - - computeSystem.handle = 0 - computeSystem.closedWaitOnce.Do(func() { - computeSystem.waitError = ErrAlreadyClosed - close(computeSystem.waitBlock) - }) - - return nil -} - -func (computeSystem *System) registerCallback(ctx context.Context) error { - callbackContext := ¬ificationWatcherContext{ - channels: newSystemChannels(), - systemID: computeSystem.id, - } - - callbackMapLock.Lock() - callbackNumber := nextCallback - nextCallback++ - callbackMap[callbackNumber] = callbackContext - callbackMapLock.Unlock() - - callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber) - if err != nil { - return err - } - callbackContext.handle = callbackHandle - computeSystem.callbackNumber = callbackNumber - - return nil -} - -func (computeSystem *System) unregisterCallback(ctx context.Context) error { - callbackNumber := computeSystem.callbackNumber - - callbackMapLock.RLock() - callbackContext := callbackMap[callbackNumber] - callbackMapLock.RUnlock() - - if callbackContext == nil { - return nil - } - - handle := callbackContext.handle - - if handle == 0 { - return nil - } - - // hcsUnregisterComputeSystemCallback has its own synchronization - // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. - err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle) - if err != nil { - return err - } - - closeChannels(callbackContext.channels) - - callbackMapLock.Lock() - delete(callbackMap, callbackNumber) - callbackMapLock.Unlock() - - handle = 0 //nolint:ineffassign - - return nil -} - -// Modify the System by sending a request to HCS -func (computeSystem *System) Modify(ctx context.Context, config interface{}) error { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::Modify" - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - requestBytes, err := json.Marshal(config) - if err != nil { - return err - } - - requestJSON := string(requestBytes) - resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go deleted file mode 100644 index 5dcb97eb39..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go +++ /dev/null @@ -1,64 +0,0 @@ -//go:build windows - -package hcs - -import ( - "context" - "io" - "syscall" - - "github.com/Microsoft/go-winio" - diskutil "github.com/Microsoft/go-winio/vhd" - "github.com/Microsoft/hcsshim/computestorage" - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles -// if there is an error. -func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { - fs := make([]io.ReadWriteCloser, len(hs)) - for i, h := range hs { - if h != syscall.Handle(0) { - if err == nil { - fs[i], err = winio.MakeOpenFile(h) - } - if err != nil { - syscall.Close(h) - } - } - } - if err != nil { - for _, f := range fs { - if f != nil { - f.Close() - } - } - return nil, err - } - return fs, nil -} - -// CreateNTFSVHD creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`. -func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) { - if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil { - return errors.Wrap(err, "failed to create VHD") - } - - vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone) - if err != nil { - return errors.Wrap(err, "failed to open VHD") - } - defer func() { - err2 := windows.CloseHandle(windows.Handle(vhd)) - if err == nil { - err = errors.Wrap(err2, "failed to close VHD") - } - }() - - if err := computestorage.FormatWritableLayerVhd(ctx, windows.Handle(vhd)); err != nil { - return errors.Wrap(err, "failed to format VHD") - } - - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go deleted file mode 100644 index 6e161e6aa1..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go +++ /dev/null @@ -1,70 +0,0 @@ -//go:build windows - -package hcs - -import ( - "context" - "time" - - "github.com/Microsoft/hcsshim/internal/log" -) - -func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) { - events := processHcsResult(ctx, resultJSON) - if IsPending(err) { - return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout) - } - - return events, err -} - -func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { - callbackMapLock.RLock() - if _, ok := callbackMap[callbackNumber]; !ok { - callbackMapLock.RUnlock() - log.G(ctx).WithField("callbackNumber", callbackNumber).Error("failed to waitForNotification: callbackNumber does not exist in callbackMap") - return ErrHandleClose - } - channels := callbackMap[callbackNumber].channels - callbackMapLock.RUnlock() - - expectedChannel := channels[expectedNotification] - if expectedChannel == nil { - log.G(ctx).WithField("type", expectedNotification).Error("unknown notification type in waitForNotification") - return ErrInvalidNotificationType - } - - var c <-chan time.Time - if timeout != nil { - timer := time.NewTimer(*timeout) - c = timer.C - defer timer.Stop() - } - - select { - case err, ok := <-expectedChannel: - if !ok { - return ErrHandleClose - } - return err - case err, ok := <-channels[hcsNotificationSystemExited]: - if !ok { - return ErrHandleClose - } - // If the expected notification is hcsNotificationSystemExited which of the two selects - // chosen is random. Return the raw error if hcsNotificationSystemExited is expected - if channels[hcsNotificationSystemExited] == expectedChannel { - return err - } - return ErrUnexpectedContainerExit - case _, ok := <-channels[hcsNotificationServiceDisconnect]: - if !ok { - return ErrHandleClose - } - // hcsNotificationServiceDisconnect should never be an expected notification - // it does not need the same handling as hcsNotificationSystemExited - return ErrUnexpectedProcessAbort - case <-c: - return ErrTimeout - } -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go deleted file mode 100644 index ce70676789..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go +++ /dev/null @@ -1 +0,0 @@ -package hcserror diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go deleted file mode 100644 index bad2705416..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build windows - -package hcserror - -import ( - "fmt" - "syscall" -) - -const ERROR_GEN_FAILURE = syscall.Errno(31) - -type HcsError struct { - title string - rest string - Err error -} - -func (e *HcsError) Error() string { - s := e.title - if len(s) > 0 && s[len(s)-1] != ' ' { - s += " " - } - s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, Win32FromError(e.Err)) - if e.rest != "" { - if e.rest[0] != ' ' { - s += " " - } - s += e.rest - } - return s -} - -func New(err error, title, rest string) error { - // Pass through DLL errors directly since they do not originate from HCS. - if _, ok := err.(*syscall.DLLError); ok { - return err - } - return &HcsError{title, rest, err} -} - -func Win32FromError(err error) uint32 { - if herr, ok := err.(*HcsError); ok { - return Win32FromError(herr.Err) - } - if code, ok := err.(syscall.Errno); ok { - return uint32(code) - } - return uint32(ERROR_GEN_FAILURE) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/clone.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/clone.go deleted file mode 100644 index 5a8a5fa5d0..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/clone.go +++ /dev/null @@ -1,47 +0,0 @@ -//go:build windows -// +build windows - -package hcsoci - -import ( - "context" - "fmt" - - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" -) - -// Usually mounts specified in the container config are added in the container doc -// that is passed along with the container creation reuqest. However, for cloned containers -// we don't send any create container request so we must add the mounts one by one by -// doing Modify requests to that container. -func addMountsToClone(ctx context.Context, c cow.Container, mounts *mountsConfig) error { - // TODO(ambarve) : Find out if there is a way to send request for all the mounts - // at the same time to save time - for _, md := range mounts.mdsv2 { - requestDocument := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeAdd, - ResourcePath: resourcepaths.SiloMappedDirectoryResourcePath, - Settings: md, - } - err := c.Modify(ctx, requestDocument) - if err != nil { - return fmt.Errorf("error while adding mapped directory (%s) to the container: %s", md.HostPath, err) - } - } - - for _, mp := range mounts.mpsv2 { - requestDocument := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeAdd, - ResourcePath: resourcepaths.SiloMappedPipeResourcePath, - Settings: mp, - } - err := c.Modify(ctx, requestDocument) - if err != nil { - return fmt.Errorf("error while adding mapped pipe (%s) to the container: %s", mp.HostPath, err) - } - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go deleted file mode 100644 index 7b3b7ef983..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go +++ /dev/null @@ -1,476 +0,0 @@ -//go:build windows -// +build windows - -package hcsoci - -import ( - "context" - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "strconv" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/clone" - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/guestpath" - "github.com/Microsoft/hcsshim/internal/hcs" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/oci" - "github.com/Microsoft/hcsshim/internal/resources" - "github.com/Microsoft/hcsshim/internal/schemaversion" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/Microsoft/hcsshim/pkg/annotations" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" -) - -var ( - lcowRootInUVM = guestpath.LCOWRootPrefixInUVM + "/%s" - wcowRootInUVM = guestpath.WCOWRootPrefixInUVM + "/%s" -) - -// CreateOptions are the set of fields used to call CreateContainer(). -// Note: In the spec, the LayerFolders must be arranged in the same way in which -// moby configures them: layern, layern-1,...,layer2,layer1,scratch -// where layer1 is the base read-only layer, layern is the top-most read-only -// layer, and scratch is the RW layer. This is for historical reasons only. -type CreateOptions struct { - // Common parameters - ID string // Identifier for the container - Owner string // Specifies the owner. Defaults to executable name. - Spec *specs.Spec // Definition of the container or utility VM being created - SchemaVersion *hcsschema.Version // Requested Schema Version. Defaults to v2 for RS5, v1 for RS1..RS4 - HostingSystem *uvm.UtilityVM // Utility or service VM in which the container is to be created. - NetworkNamespace string // Host network namespace to use (overrides anything in the spec) - - // This is an advanced debugging parameter. It allows for diagnosability by leaving a containers - // resources allocated in case of a failure. Thus you would be able to use tools such as hcsdiag - // to look at the state of a utility VM to see what resources were allocated. Obviously the caller - // must a) not tear down the utility VM on failure (or pause in some way) and b) is responsible for - // performing the ReleaseResources() call themselves. - DoNotReleaseResourcesOnFailure bool - - // ScaleCPULimitsToSandbox indicates that the container CPU limits should be adjusted to account - // for the difference in CPU count between the host and the UVM. - ScaleCPULimitsToSandbox bool -} - -// createOptionsInternal is the set of user-supplied create options, but includes internal -// fields for processing the request once user-supplied stuff has been validated. -type createOptionsInternal struct { - *CreateOptions - - actualSchemaVersion *hcsschema.Version // Calculated based on Windows build and optional caller-supplied override - actualID string // Identifier for the container - actualOwner string // Owner for the container - actualNetworkNamespace string - ccgState *hcsschema.ContainerCredentialGuardState // Container Credential Guard information to be attached to HCS container document - isTemplate bool // Are we going to save this container as a template - templateID string // Template ID of the template from which this container is being cloned -} - -// compares two slices of strings and returns true if they are same, returns false otherwise. -// The elements in the slices don't have to be in the same order for them to be equal. -func cmpSlices(s1, s2 []string) bool { - equal := (len(s1) == len(s2)) - for i := 0; equal && i < len(s1); i++ { - found := false - for j := 0; !found && j < len(s2); j++ { - found = (s1[i] == s2[j]) - } - equal = equal && found - } - return equal -} - -// verifyCloneContainerSpecs compares the container creation spec provided during the template container -// creation and the spec provided during cloned container creation and checks that all the fields match -// (except for the certain fields that are allowed to be different). -func verifyCloneContainerSpecs(templateSpec, cloneSpec *specs.Spec) error { - // Following fields can be different in the template and clone specs. - // 1. Process - // 2. Annotations - Only the template/cloning related annotations can be different. - // 3. Windows.LayerFolders - Only the last i.e scratch layer can be different. - - if templateSpec.Version != cloneSpec.Version { - return fmt.Errorf("OCI Runtime Spec version of template (%s) doesn't match with the Spec version of clone (%s)", templateSpec.Version, cloneSpec.Version) - } - - // for annotations check that the values of memory & cpu annotations are same - if templateSpec.Annotations[annotations.ContainerMemorySizeInMB] != cloneSpec.Annotations[annotations.ContainerMemorySizeInMB] { - return errors.New("memory size limit for template and clone containers can not be different") - } - if templateSpec.Annotations[annotations.ContainerProcessorCount] != cloneSpec.Annotations[annotations.ContainerProcessorCount] { - return errors.New("processor count for template and clone containers can not be different") - } - if templateSpec.Annotations[annotations.ContainerProcessorLimit] != cloneSpec.Annotations[annotations.ContainerProcessorLimit] { - return errors.New("processor limit for template and clone containers can not be different") - } - - // LayerFolders should be identical except for the last element. - if !cmpSlices(templateSpec.Windows.LayerFolders[:len(templateSpec.Windows.LayerFolders)-1], cloneSpec.Windows.LayerFolders[:len(cloneSpec.Windows.LayerFolders)-1]) { - return errors.New("layers provided for template container and clone container don't match. Check the image specified in container config") - } - - if !reflect.DeepEqual(templateSpec.Windows.HyperV, cloneSpec.Windows.HyperV) { - return errors.New("HyperV spec for template and clone containers can not be different") - } - - if templateSpec.Windows.Network.AllowUnqualifiedDNSQuery != cloneSpec.Windows.Network.AllowUnqualifiedDNSQuery { - return errors.New("different values for allow unqualified DNS query can not be provided for template and clones") - } - if templateSpec.Windows.Network.NetworkSharedContainerName != cloneSpec.Windows.Network.NetworkSharedContainerName { - return errors.New("different network shared name can not be provided for template and clones") - } - if !cmpSlices(templateSpec.Windows.Network.DNSSearchList, cloneSpec.Windows.Network.DNSSearchList) { - return errors.New("different DNS search list can not be provided for template and clones") - } - - return nil -} - -func validateContainerConfig(ctx context.Context, coi *createOptionsInternal) error { - if coi.HostingSystem != nil && coi.HostingSystem.IsTemplate && !coi.isTemplate { - return fmt.Errorf("only a template container can be created inside a template pod. Any other combination is not valid") - } - - if coi.HostingSystem != nil && coi.templateID != "" && !coi.HostingSystem.IsClone { - return fmt.Errorf("a container can not be cloned inside a non cloned POD") - } - - if coi.templateID != "" { - // verify that the configurations provided for the template for - // this clone are same. - tc, err := clone.FetchTemplateConfig(ctx, coi.HostingSystem.TemplateID) - if err != nil { - return fmt.Errorf("config validation failed : %s", err) - } - if err := verifyCloneContainerSpecs(&tc.TemplateContainerSpec, coi.Spec); err != nil { - return err - } - } - - if coi.HostingSystem != nil && coi.HostingSystem.IsTemplate { - if len(coi.Spec.Windows.Devices) != 0 { - return fmt.Errorf("mapped Devices are not supported for template containers") - } - - if _, ok := coi.Spec.Windows.CredentialSpec.(string); ok { - return fmt.Errorf("gmsa specifications are not supported for template containers") - } - - if coi.Spec.Windows.Servicing { - return fmt.Errorf("template containers can't be started in servicing mode") - } - - // check that no mounts are specified. - if len(coi.Spec.Mounts) > 0 { - return fmt.Errorf("user specified mounts are not permitted for template containers") - } - } - - // check if gMSA is disabled - if coi.Spec.Windows != nil { - disableGMSA := oci.ParseAnnotationsDisableGMSA(ctx, coi.Spec) - if _, ok := coi.Spec.Windows.CredentialSpec.(string); ok && disableGMSA { - return fmt.Errorf("gMSA credentials are disabled: %w", hcs.ErrOperationDenied) - } - } - - return nil -} - -func initializeCreateOptions(ctx context.Context, createOptions *CreateOptions) (*createOptionsInternal, error) { - coi := &createOptionsInternal{ - CreateOptions: createOptions, - actualID: createOptions.ID, - actualOwner: createOptions.Owner, - } - - if coi.Spec == nil { - return nil, fmt.Errorf("spec must be supplied") - } - - // Defaults if omitted by caller. - if coi.actualID == "" { - g, err := guid.NewV4() - if err != nil { - return nil, err - } - coi.actualID = g.String() - } - if coi.actualOwner == "" { - coi.actualOwner = filepath.Base(os.Args[0]) - } - - if coi.HostingSystem != nil { - // By definition, a hosting system can only be supplied for a v2 Xenon. - coi.actualSchemaVersion = schemaversion.SchemaV21() - } else { - coi.actualSchemaVersion = schemaversion.DetermineSchemaVersion(coi.SchemaVersion) - } - - coi.isTemplate = oci.ParseAnnotationsSaveAsTemplate(ctx, createOptions.Spec) - coi.templateID = oci.ParseAnnotationsTemplateID(ctx, createOptions.Spec) - - log.G(ctx).WithFields(logrus.Fields{ - "options": fmt.Sprintf("%+v", createOptions), - "schema": coi.actualSchemaVersion, - }).Debug("hcsshim::initializeCreateOptions") - - return coi, nil -} - -// configureSandboxNetwork creates a new network namespace for the pod (sandbox) -// if required and then adds that namespace to the pod. -func configureSandboxNetwork(ctx context.Context, coi *createOptionsInternal, r *resources.Resources, ct oci.KubernetesContainerType) error { - if coi.NetworkNamespace != "" { - r.SetNetNS(coi.NetworkNamespace) - } else { - err := createNetworkNamespace(ctx, coi, r) - if err != nil { - return err - } - } - coi.actualNetworkNamespace = r.NetNS() - - if coi.HostingSystem != nil { - // Only add the network namespace to a standalone or sandbox - // container but not a workload container in a sandbox that inherits - // the namespace. - if ct == oci.KubernetesContainerTypeNone || ct == oci.KubernetesContainerTypeSandbox { - if err := coi.HostingSystem.ConfigureNetworking(ctx, coi.actualNetworkNamespace); err != nil { - // No network setup type was specified for this UVM. Create and assign one here unless - // we received a different error. - if err == uvm.ErrNoNetworkSetup { - if err := coi.HostingSystem.CreateAndAssignNetworkSetup(ctx, "", ""); err != nil { - return err - } - if err := coi.HostingSystem.ConfigureNetworking(ctx, coi.actualNetworkNamespace); err != nil { - return err - } - } else { - return err - } - } - r.SetAddedNetNSToVM(true) - } - } - - return nil -} - -// CreateContainer creates a container. It can cope with a wide variety of -// scenarios, including v1 HCS schema calls, as well as more complex v2 HCS schema -// calls. Note we always return the resources that have been allocated, even in the -// case of an error. This provides support for the debugging option not to -// release the resources on failure, so that the client can make the necessary -// call to release resources that have been allocated as part of calling this function. -func CreateContainer(ctx context.Context, createOptions *CreateOptions) (_ cow.Container, _ *resources.Resources, err error) { - coi, err := initializeCreateOptions(ctx, createOptions) - if err != nil { - return nil, nil, err - } - - if err := validateContainerConfig(ctx, coi); err != nil { - return nil, nil, fmt.Errorf("container config validation failed: %s", err) - } - - r := resources.NewContainerResources(createOptions.ID) - defer func() { - if err != nil { - if !coi.DoNotReleaseResourcesOnFailure { - _ = resources.ReleaseResources(ctx, r, coi.HostingSystem, true) - } - } - }() - - if coi.HostingSystem != nil { - if coi.Spec.Linux != nil { - r.SetContainerRootInUVM(fmt.Sprintf(lcowRootInUVM, createOptions.ID)) - } else { - n := coi.HostingSystem.ContainerCounter() - r.SetContainerRootInUVM(fmt.Sprintf(wcowRootInUVM, strconv.FormatUint(n, 16))) - } - // install kernel drivers if necessary. - // do this before network setup in case any of the drivers requested are - // network drivers - driverClosers, err := installPodDrivers(ctx, coi.HostingSystem, coi.Spec.Annotations) - if err != nil { - return nil, r, err - } - r.Add(driverClosers...) - } - - ct, _, err := oci.GetSandboxTypeAndID(coi.Spec.Annotations) - if err != nil { - return nil, r, err - } - isSandbox := ct == oci.KubernetesContainerTypeSandbox - - // Create a network namespace if necessary. - if coi.Spec.Windows != nil && - coi.Spec.Windows.Network != nil && - schemaversion.IsV21(coi.actualSchemaVersion) { - err = configureSandboxNetwork(ctx, coi, r, ct) - if err != nil { - return nil, r, fmt.Errorf("failure while creating namespace for container: %s", err) - } - } - - var hcsDocument, gcsDocument interface{} - log.G(ctx).Debug("hcsshim::CreateContainer allocating resources") - if coi.Spec.Linux != nil { - if schemaversion.IsV10(coi.actualSchemaVersion) { - return nil, r, errors.New("LCOW v1 not supported") - } - log.G(ctx).Debug("hcsshim::CreateContainer allocateLinuxResources") - err = allocateLinuxResources(ctx, coi, r, isSandbox) - if err != nil { - log.G(ctx).WithError(err).Debug("failed to allocateLinuxResources") - return nil, r, err - } - gcsDocument, err = createLinuxContainerDocument(ctx, coi, r.ContainerRootInUVM(), r.LcowScratchPath()) - if err != nil { - log.G(ctx).WithError(err).Debug("failed createHCSContainerDocument") - return nil, r, err - } - } else { - err = allocateWindowsResources(ctx, coi, r, isSandbox) - if err != nil { - log.G(ctx).WithError(err).Debug("failed to allocateWindowsResources") - return nil, r, err - } - log.G(ctx).Debug("hcsshim::CreateContainer creating container document") - v1, v2, err := createWindowsContainerDocument(ctx, coi) - if err != nil { - log.G(ctx).WithError(err).Debug("failed createHCSContainerDocument") - return nil, r, err - } - - if schemaversion.IsV10(coi.actualSchemaVersion) { - // v1 Argon or Xenon. Pass the document directly to HCS. - hcsDocument = v1 - } else if coi.HostingSystem != nil { - // v2 Xenon. Pass the container object to the UVM. - gcsDocument = &hcsschema.HostedSystem{ - SchemaVersion: schemaversion.SchemaV21(), - Container: v2, - } - } else { - // v2 Argon. Pass the container object to the HCS. - hcsDocument = &hcsschema.ComputeSystem{ - Owner: coi.actualOwner, - SchemaVersion: schemaversion.SchemaV21(), - ShouldTerminateOnLastHandleClosed: true, - Container: v2, - } - } - } - - log.G(ctx).Debug("hcsshim::CreateContainer creating compute system") - if gcsDocument != nil { - c, err := coi.HostingSystem.CreateContainer(ctx, coi.actualID, gcsDocument) - if err != nil { - return nil, r, err - } - return c, r, nil - } - - system, err := hcs.CreateComputeSystem(ctx, coi.actualID, hcsDocument) - if err != nil { - return nil, r, err - } - return system, r, nil -} - -// CloneContainer is similar to CreateContainer but it does not add layers or namespace like -// CreateContainer does. Also, instead of sending create container request it sends a modify -// request to an existing container. CloneContainer only works for WCOW. -func CloneContainer(ctx context.Context, createOptions *CreateOptions) (_ cow.Container, _ *resources.Resources, err error) { - coi, err := initializeCreateOptions(ctx, createOptions) - if err != nil { - return nil, nil, err - } - - if err := validateContainerConfig(ctx, coi); err != nil { - return nil, nil, err - } - - if coi.Spec.Windows == nil || coi.HostingSystem == nil { - return nil, nil, fmt.Errorf("CloneContainer is only supported for Hyper-v isolated WCOW ") - } - - r := resources.NewContainerResources(createOptions.ID) - defer func() { - if err != nil { - if !coi.DoNotReleaseResourcesOnFailure { - _ = resources.ReleaseResources(ctx, r, coi.HostingSystem, true) - } - } - }() - - if coi.HostingSystem != nil { - n := coi.HostingSystem.ContainerCounter() - if coi.Spec.Linux != nil { - r.SetContainerRootInUVM(fmt.Sprintf(lcowRootInUVM, createOptions.ID)) - } else { - r.SetContainerRootInUVM(fmt.Sprintf(wcowRootInUVM, strconv.FormatUint(n, 16))) - } - } - - if err = setupMounts(ctx, coi, r); err != nil { - return nil, r, err - } - - mounts, err := createMountsConfig(ctx, coi) - if err != nil { - return nil, r, err - } - - c, err := coi.HostingSystem.CloneContainer(ctx, coi.actualID) - if err != nil { - return nil, r, err - } - - // Everything that is usually added to the container during the createContainer - // request (via the gcsDocument) must be hot added here. - if err := addMountsToClone(ctx, c, mounts); err != nil { - return nil, r, err - } - - return c, r, nil -} - -// isV2Xenon returns true if the create options are for a HCS schema V2 xenon container -// with a hosting VM -func (coi *createOptionsInternal) isV2Xenon() bool { - return schemaversion.IsV21(coi.actualSchemaVersion) && coi.HostingSystem != nil -} - -// isV1Xenon returns true if the create options are for a HCS schema V1 xenon container -// with a hosting VM -func (coi *createOptionsInternal) isV1Xenon() bool { - return schemaversion.IsV10(coi.actualSchemaVersion) && coi.HostingSystem != nil -} - -// isV2Argon returns true if the create options are for a HCS schema V2 argon container -// which should have no hosting VM -func (coi *createOptionsInternal) isV2Argon() bool { - return schemaversion.IsV21(coi.actualSchemaVersion) && coi.HostingSystem == nil -} - -// isV1Argon returns true if the create options are for a HCS schema V1 argon container -// which should have no hyperv settings -func (coi *createOptionsInternal) isV1Argon() bool { - return schemaversion.IsV10(coi.actualSchemaVersion) && coi.Spec.Windows.HyperV == nil -} - -func (coi *createOptionsInternal) hasWindowsAssignedDevices() bool { - return (coi.Spec.Windows != nil) && (coi.Spec.Windows.Devices != nil) && - (len(coi.Spec.Windows.Devices) > 0) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/devices.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/devices.go deleted file mode 100644 index ccc19d4af8..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/devices.go +++ /dev/null @@ -1,278 +0,0 @@ -//go:build windows - -package hcsoci - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - - "github.com/Microsoft/hcsshim/internal/devices" - "github.com/Microsoft/hcsshim/internal/guestpath" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/oci" - "github.com/Microsoft/hcsshim/internal/resources" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/Microsoft/hcsshim/osversion" - "github.com/Microsoft/hcsshim/pkg/annotations" -) - -const deviceUtilExeName = "device-util.exe" - -// getSpecKernelDrivers gets any device drivers specified on the spec. -// Drivers are optional, therefore do not return an error if none are on the spec. -func getSpecKernelDrivers(annots map[string]string) ([]string, error) { - drivers := oci.ParseAnnotationCommaSeparated(annotations.VirtualMachineKernelDrivers, annots) - for _, driver := range drivers { - if _, err := os.Stat(driver); err != nil { - return nil, errors.Wrapf(err, "failed to find path to drivers at %s", driver) - } - } - return drivers, nil -} - -// getDeviceExtensionPaths gets any device extensions paths specified on the spec. -// device extensions are optional, therefore if none are on the spec, do not return an error. -func getDeviceExtensionPaths(annots map[string]string) ([]string, error) { - extensions := oci.ParseAnnotationCommaSeparated(annotations.DeviceExtensions, annots) - for _, ext := range extensions { - if _, err := os.Stat(ext); err != nil { - return nil, errors.Wrapf(err, "failed to find path to driver extensions at %s", ext) - } - } - return extensions, nil -} - -// getGPUVHDPath gets the gpu vhd path from the shim options or uses the default if no -// shim option is set. Right now we only support Nvidia gpus, so this will default to -// a gpu vhd with nvidia files -func getGPUVHDPath(annot map[string]string) (string, error) { - gpuVHDPath, ok := annot[annotations.GPUVHDPath] - if !ok || gpuVHDPath == "" { - return "", errors.New("no gpu vhd specified") - } - if _, err := os.Stat(gpuVHDPath); err != nil { - return "", errors.Wrapf(err, "failed to find gpu support vhd %s", gpuVHDPath) - } - return gpuVHDPath, nil -} - -// getDeviceUtilHostPath is a simple helper function to find the host path of the device-util tool -func getDeviceUtilHostPath() string { - return filepath.Join(filepath.Dir(os.Args[0]), deviceUtilExeName) -} - -func isDeviceExtensionsSupported() bool { - // device extensions support was added from 20348 onwards. - return osversion.Build() >= 20348 -} - -// getDeviceExtensions is a helper function to read the files at `extensionPaths` and unmarshal the contents -// into a `hcsshema.DeviceExtension` to be added to a container's hcs create document. -func getDeviceExtensions(annotations map[string]string) (*hcsschema.ContainerDefinitionDevice, error) { - extensionPaths, err := getDeviceExtensionPaths(annotations) - if err != nil { - return nil, err - } - - if len(extensionPaths) == 0 { - return nil, nil - } - - if !isDeviceExtensionsSupported() { - return nil, fmt.Errorf("device extensions are not supported on this build (%d)", osversion.Build()) - } - - results := &hcsschema.ContainerDefinitionDevice{ - DeviceExtension: []hcsschema.DeviceExtension{}, - } - for _, extensionPath := range extensionPaths { - data, err := ioutil.ReadFile(extensionPath) - if err != nil { - return nil, errors.Wrapf(err, "failed to read extension file at %s", extensionPath) - } - extension := hcsschema.DeviceExtension{} - if err := json.Unmarshal(data, &extension); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal extension file at %s", extensionPath) - } - results.DeviceExtension = append(results.DeviceExtension, extension) - } - return results, nil -} - -// handleAssignedDevicesWindows does all of the work to setup the hosting UVM, assign in devices -// specified on the spec, and install any necessary, specified kernel drivers into the UVM. -// -// Drivers must be installed after the target devices are assigned into the UVM. -// This ordering allows us to guarantee that driver installation on a device in the UVM is completed -// before we attempt to create a container. -func handleAssignedDevicesWindows( - ctx context.Context, - vm *uvm.UtilityVM, - annotations map[string]string, - specDevs []specs.WindowsDevice) (resultDevs []specs.WindowsDevice, closers []resources.ResourceCloser, err error) { - defer func() { - if err != nil { - // best effort clean up allocated resources on failure - for _, r := range closers { - if releaseErr := r.Release(ctx); releaseErr != nil { - log.G(ctx).WithError(releaseErr).Error("failed to release container resource") - } - } - closers = nil - resultDevs = nil - } - }() - - // install the device util tool in the UVM - toolHostPath := getDeviceUtilHostPath() - options := vm.DefaultVSMBOptions(true) - toolsShare, err := vm.AddVSMB(ctx, toolHostPath, options) - if err != nil { - return nil, closers, fmt.Errorf("failed to add VSMB share to utility VM for path %+v: %s", toolHostPath, err) - } - closers = append(closers, toolsShare) - deviceUtilPath, err := vm.GetVSMBUvmPath(ctx, toolHostPath, true) - if err != nil { - return nil, closers, err - } - - // assign device into UVM and create corresponding spec windows devices - for _, d := range specDevs { - pciID, index := getDeviceInfoFromPath(d.ID) - vpciCloser, locationPaths, err := devices.AddDevice(ctx, vm, d.IDType, pciID, index, deviceUtilPath) - if err != nil { - return nil, nil, err - } - closers = append(closers, vpciCloser) - for _, value := range locationPaths { - specDev := specs.WindowsDevice{ - ID: value, - IDType: uvm.VPCILocationPathIDType, - } - log.G(ctx).WithField("parsed devices", specDev).Info("added windows device to spec") - resultDevs = append(resultDevs, specDev) - } - } - - return resultDevs, closers, nil -} - -// handleAssignedDevicesLCOW does all of the work to setup the hosting UVM, assign in devices -// specified on the spec -// -// For LCOW, drivers must be installed before the target devices are assigned into the UVM so they -// can be linked on arrival. -func handleAssignedDevicesLCOW( - ctx context.Context, - vm *uvm.UtilityVM, - annotations map[string]string, - specDevs []specs.WindowsDevice) (resultDevs []specs.WindowsDevice, closers []resources.ResourceCloser, err error) { - defer func() { - if err != nil { - // best effort clean up allocated resources on failure - for _, r := range closers { - if releaseErr := r.Release(ctx); releaseErr != nil { - log.G(ctx).WithError(releaseErr).Error("failed to release container resource") - } - } - closers = nil - resultDevs = nil - } - }() - - gpuPresent := false - - // assign device into UVM and create corresponding spec windows devices - for _, d := range specDevs { - switch d.IDType { - case uvm.VPCIDeviceIDType, uvm.VPCIDeviceIDTypeLegacy, uvm.GPUDeviceIDType: - gpuPresent = gpuPresent || d.IDType == uvm.GPUDeviceIDType - pciID, index := getDeviceInfoFromPath(d.ID) - vpci, err := vm.AssignDevice(ctx, pciID, index, "") - if err != nil { - return resultDevs, closers, errors.Wrapf(err, "failed to assign device %s, function %d to pod %s", pciID, index, vm.ID()) - } - closers = append(closers, vpci) - - // update device ID on the spec to the assigned device's resulting vmbus guid so gcs knows which devices to - // map into the container - d.ID = vpci.VMBusGUID - resultDevs = append(resultDevs, d) - default: - return resultDevs, closers, errors.Errorf("specified device %s has unsupported type %s", d.ID, d.IDType) - } - } - - if gpuPresent { - gpuSupportVhdPath, err := getGPUVHDPath(annotations) - if err != nil { - return resultDevs, closers, errors.Wrapf(err, "failed to add gpu vhd to %v", vm.ID()) - } - // use lcowNvidiaMountPath since we only support nvidia gpus right now - // must use scsi here since DDA'ing a hyper-v pci device is not supported on VMs that have ANY virtual memory - // gpuvhd must be granted VM Group access. - options := []string{"ro"} - scsiMount, err := vm.AddSCSI( - ctx, - gpuSupportVhdPath, - guestpath.LCOWNvidiaMountPath, - true, - false, - options, - uvm.VMAccessTypeNoop, - ) - if err != nil { - return resultDevs, closers, errors.Wrapf(err, "failed to add scsi device %s in the UVM %s at %s", gpuSupportVhdPath, vm.ID(), guestpath.LCOWNvidiaMountPath) - } - closers = append(closers, scsiMount) - } - - return resultDevs, closers, nil -} - -func installPodDrivers(ctx context.Context, vm *uvm.UtilityVM, annotations map[string]string) (closers []resources.ResourceCloser, err error) { - defer func() { - if err != nil { - // best effort clean up allocated resources on failure - for _, r := range closers { - if releaseErr := r.Release(ctx); releaseErr != nil { - log.G(ctx).WithError(releaseErr).Error("failed to release container resource") - } - } - } - }() - - // get the spec specified kernel drivers and install them on the UVM - drivers, err := getSpecKernelDrivers(annotations) - if err != nil { - return closers, err - } - for _, d := range drivers { - driverCloser, err := devices.InstallKernelDriver(ctx, vm, d) - if err != nil { - return closers, err - } - closers = append(closers, driverCloser) - } - return closers, err -} - -func getDeviceInfoFromPath(rawDevicePath string) (string, uint16) { - indexString := filepath.Base(rawDevicePath) - index, err := strconv.ParseUint(indexString, 10, 16) - if err == nil { - // we have a vf index - return filepath.Dir(rawDevicePath), uint16(index) - } - // otherwise, just use default index and full device ID given - return rawDevicePath, 0 -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/doc.go deleted file mode 100644 index b4b2ac611b..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/doc.go +++ /dev/null @@ -1 +0,0 @@ -package hcsoci diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_lcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_lcow.go deleted file mode 100644 index 53b0de54b3..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_lcow.go +++ /dev/null @@ -1,99 +0,0 @@ -//go:build windows -// +build windows - -package hcsoci - -import ( - "context" - "encoding/json" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/schemaversion" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func createLCOWSpec(coi *createOptionsInternal) (*specs.Spec, error) { - // Remarshal the spec to perform a deep copy. - j, err := json.Marshal(coi.Spec) - if err != nil { - return nil, err - } - spec := &specs.Spec{} - err = json.Unmarshal(j, spec) - if err != nil { - return nil, err - } - - // Linux containers don't care about Windows aspects of the spec except the - // network namespace and windows devices - spec.Windows = nil - if coi.Spec.Windows != nil { - setWindowsNetworkNamespace(coi, spec) - setWindowsDevices(coi, spec) - } - - // Hooks are not supported (they should be run in the host) - spec.Hooks = nil - - // Clear unsupported features - spec.Linux.CgroupsPath = "" // GCS controls its cgroups hierarchy on its own. - if spec.Linux.Resources != nil { - spec.Linux.Resources.Devices = nil - spec.Linux.Resources.Pids = nil - spec.Linux.Resources.BlockIO = nil - spec.Linux.Resources.HugepageLimits = nil - spec.Linux.Resources.Network = nil - } - spec.Linux.Seccomp = nil - - return spec, nil -} - -func setWindowsNetworkNamespace(coi *createOptionsInternal, spec *specs.Spec) { - if coi.Spec.Windows.Network != nil && - coi.Spec.Windows.Network.NetworkNamespace != "" { - if spec.Windows == nil { - spec.Windows = &specs.Windows{} - } - spec.Windows.Network = &specs.WindowsNetwork{ - NetworkNamespace: coi.Spec.Windows.Network.NetworkNamespace, - } - } -} - -func setWindowsDevices(coi *createOptionsInternal, spec *specs.Spec) { - if coi.Spec.Windows.Devices != nil { - if spec.Windows == nil { - spec.Windows = &specs.Windows{} - } - spec.Windows.Devices = coi.Spec.Windows.Devices - } -} - -type linuxHostedSystem struct { - SchemaVersion *hcsschema.Version - OciBundlePath string - OciSpecification *specs.Spec - - // ScratchDirPath represents the path inside the UVM at which the container scratch - // directory is present. Usually, this is the path at which the container scratch - // VHD is mounted inside the UVM. But in case of scratch sharing this is a - // directory under the UVM scratch directory. - ScratchDirPath string -} - -func createLinuxContainerDocument(ctx context.Context, coi *createOptionsInternal, guestRoot, scratchPath string) (*linuxHostedSystem, error) { - spec, err := createLCOWSpec(coi) - if err != nil { - return nil, err - } - - log.G(ctx).WithField("guestRoot", guestRoot).Debug("hcsshim::createLinuxContainerDoc") - return &linuxHostedSystem{ - SchemaVersion: schemaversion.SchemaV21(), - OciBundlePath: guestRoot, - OciSpecification: spec, - ScratchDirPath: scratchPath, - }, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go deleted file mode 100644 index 1d25b44438..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go +++ /dev/null @@ -1,485 +0,0 @@ -//go:build windows -// +build windows - -package hcsoci - -import ( - "context" - "errors" - "fmt" - "path/filepath" - "regexp" - "strings" - - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" - - "github.com/Microsoft/hcsshim/internal/guestpath" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/layers" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/oci" - "github.com/Microsoft/hcsshim/internal/processorinfo" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/Microsoft/hcsshim/internal/uvmfolder" - "github.com/Microsoft/hcsshim/internal/wclayer" - "github.com/Microsoft/hcsshim/osversion" - "github.com/Microsoft/hcsshim/pkg/annotations" -) - -// A simple wrapper struct around the container mount configs that should be added to the -// container. -type mountsConfig struct { - mdsv1 []schema1.MappedDir - mpsv1 []schema1.MappedPipe - mdsv2 []hcsschema.MappedDirectory - mpsv2 []hcsschema.MappedPipe -} - -func createMountsConfig(ctx context.Context, coi *createOptionsInternal) (*mountsConfig, error) { - // Add the mounts as mapped directories or mapped pipes - // TODO: Mapped pipes to add in v2 schema. - var config mountsConfig - for _, mount := range coi.Spec.Mounts { - if uvm.IsPipe(mount.Source) { - src, dst := uvm.GetContainerPipeMapping(coi.HostingSystem, mount) - config.mpsv1 = append(config.mpsv1, schema1.MappedPipe{HostPath: src, ContainerPipeName: dst}) - config.mpsv2 = append(config.mpsv2, hcsschema.MappedPipe{HostPath: src, ContainerPipeName: dst}) - } else { - readOnly := false - for _, o := range mount.Options { - if strings.ToLower(o) == "ro" { - readOnly = true - } - } - mdv1 := schema1.MappedDir{HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: readOnly} - mdv2 := hcsschema.MappedDirectory{ContainerPath: mount.Destination, ReadOnly: readOnly} - if coi.HostingSystem == nil { - // HCS has a bug where it does not correctly resolve file (not dir) paths - // if the path includes a symlink. Therefore, we resolve the path here before - // passing it in. The issue does not occur with VSMB, so don't need to worry - // about the isolated case. - src, err := filepath.EvalSymlinks(mount.Source) - if err != nil { - return nil, fmt.Errorf("failed to eval symlinks for mount source %q: %s", mount.Source, err) - } - mdv2.HostPath = src - } else if mount.Type == "virtual-disk" || mount.Type == "physical-disk" || mount.Type == "extensible-virtual-disk" { - mountPath := mount.Source - var err error - if mount.Type == "extensible-virtual-disk" { - _, mountPath, err = uvm.ParseExtensibleVirtualDiskPath(mount.Source) - if err != nil { - return nil, err - } - } - uvmPath, err := coi.HostingSystem.GetScsiUvmPath(ctx, mountPath) - if err != nil { - return nil, err - } - mdv2.HostPath = uvmPath - } else if strings.HasPrefix(mount.Source, guestpath.SandboxMountPrefix) { - // Convert to the path in the guest that was asked for. - mdv2.HostPath = convertToWCOWSandboxMountPath(mount.Source) - } else { - // vsmb mount - uvmPath, err := coi.HostingSystem.GetVSMBUvmPath(ctx, mount.Source, readOnly) - if err != nil { - return nil, err - } - mdv2.HostPath = uvmPath - } - config.mdsv1 = append(config.mdsv1, mdv1) - config.mdsv2 = append(config.mdsv2, mdv2) - } - } - return &config, nil -} - -// ConvertCPULimits handles the logic of converting and validating the containers CPU limits -// specified in the OCI spec to what HCS expects. -// -// `cid` is the container's ID. -// -// `vmid` is the Utility VM's ID if the container we're constructing is going to belong to -// one. -// -// `spec` is the OCI spec for the container. -// -// `maxCPUCount` is the maximum cpu count allowed for the container. This value should -// be the number of processors on the host, or in the case of a hypervisor isolated container -// the number of processors assigned to the guest/Utility VM. -// -// Returns the cpu count, cpu limit, and cpu weight in this order. Returns an error if more than one of -// cpu count, cpu limit, or cpu weight was specified in the OCI spec as they are mutually -// exclusive. -func ConvertCPULimits(ctx context.Context, cid string, spec *specs.Spec, maxCPUCount int32) (int32, int32, int32, error) { - cpuNumSet := 0 - cpuCount := oci.ParseAnnotationsCPUCount(ctx, spec, annotations.ContainerProcessorCount, 0) - if cpuCount > 0 { - cpuNumSet++ - } - - cpuLimit := oci.ParseAnnotationsCPULimit(ctx, spec, annotations.ContainerProcessorLimit, 0) - if cpuLimit > 0 { - cpuNumSet++ - } - - cpuWeight := oci.ParseAnnotationsCPUWeight(ctx, spec, annotations.ContainerProcessorWeight, 0) - if cpuWeight > 0 { - cpuNumSet++ - } - - if cpuNumSet > 1 { - return 0, 0, 0, fmt.Errorf("invalid spec - Windows Container CPU Count: '%d', Limit: '%d', and Weight: '%d' are mutually exclusive", cpuCount, cpuLimit, cpuWeight) - } else if cpuNumSet == 1 { - cpuCount = NormalizeProcessorCount(ctx, cid, cpuCount, maxCPUCount) - } - return cpuCount, cpuLimit, cpuWeight, nil -} - -// createWindowsContainerDocument creates documents for passing to HCS or GCS to create -// a container, both hosted and process isolated. It creates both v1 and v2 -// container objects, WCOW only. The containers storage should have been mounted already. -func createWindowsContainerDocument(ctx context.Context, coi *createOptionsInternal) (*schema1.ContainerConfig, *hcsschema.Container, error) { - log.G(ctx).Debug("hcsshim: CreateHCSContainerDocument") - // TODO: Make this safe if exported so no null pointer dereferences. - - if coi.Spec == nil { - return nil, nil, fmt.Errorf("cannot create HCS container document - OCI spec is missing") - } - - if coi.Spec.Windows == nil { - return nil, nil, fmt.Errorf("cannot create HCS container document - OCI spec Windows section is missing ") - } - - v1 := &schema1.ContainerConfig{ - SystemType: "Container", - Name: coi.actualID, - Owner: coi.actualOwner, - HvPartition: false, - IgnoreFlushesDuringBoot: coi.Spec.Windows.IgnoreFlushesDuringBoot, - } - - // IgnoreFlushesDuringBoot is a property of the SCSI attachment for the scratch. Set when it's hot-added to the utility VM - // ID is a property on the create call in V2 rather than part of the schema. - v2Container := &hcsschema.Container{Storage: &hcsschema.Storage{}} - - // TODO: Still want to revisit this. - if coi.Spec.Windows.LayerFolders == nil || len(coi.Spec.Windows.LayerFolders) < 2 { - return nil, nil, fmt.Errorf("invalid spec - not enough layer folders supplied") - } - - if coi.Spec.Hostname != "" { - v1.HostName = coi.Spec.Hostname - v2Container.GuestOs = &hcsschema.GuestOs{HostName: coi.Spec.Hostname} - } - - var ( - uvmCPUCount int32 - hostCPUCount = processorinfo.ProcessorCount() - maxCPUCount = hostCPUCount - ) - - if coi.HostingSystem != nil { - uvmCPUCount = coi.HostingSystem.ProcessorCount() - maxCPUCount = uvmCPUCount - } - - cpuCount, cpuLimit, cpuWeight, err := ConvertCPULimits(ctx, coi.ID, coi.Spec, maxCPUCount) - if err != nil { - return nil, nil, err - } - - if coi.HostingSystem != nil && coi.ScaleCPULimitsToSandbox && cpuLimit > 0 { - // When ScaleCPULimitsToSandbox is set and we are running in a UVM, we assume - // the CPU limit has been calculated based on the number of processors on the - // host, and instead re-calculate it based on the number of processors in the UVM. - // - // This is needed to work correctly with assumptions kubelet makes when computing - // the CPU limit value: - // - kubelet thinks about CPU limits in terms of millicores, which are 1000ths of - // cores. So if 2000 millicores are assigned, the container can use 2 processors. - // - In Windows, the job object CPU limit is global across all processors on the - // system, and is represented as a fraction out of 10000. In this model, a limit - // of 10000 means the container can use all processors fully, regardless of how - // many processors exist on the system. - // - To convert the millicores value into the job object limit, kubelet divides - // the millicores by the number of CPU cores on the host. This causes problems - // when running inside a UVM, as the UVM may have a different number of processors - // than the host system. - // - // To work around this, we undo the division by the number of host processors, and - // re-do the division based on the number of processors inside the UVM. This will - // give the correct value based on the actual number of millicores that the kubelet - // wants the container to have. - // - // Kubelet formula to compute CPU limit: - // cpuMaximum := 10000 * cpuLimit.MilliValue() / int64(runtime.NumCPU()) / 1000 - newCPULimit := cpuLimit * hostCPUCount / uvmCPUCount - // We only apply bounds here because we are calculating the CPU limit ourselves, - // and this matches the kubelet behavior where they also bound the CPU limit by [1, 10000]. - // In the case where we use the value directly from the user, we don't alter it to fit - // within the bounds, but just let the platform throw an error if it is invalid. - if newCPULimit < 1 { - newCPULimit = 1 - } else if newCPULimit > 10000 { - newCPULimit = 10000 - } - log.G(ctx).WithFields(logrus.Fields{ - "hostCPUCount": hostCPUCount, - "uvmCPUCount": uvmCPUCount, - "oldCPULimit": cpuLimit, - "newCPULimit": newCPULimit, - }).Info("rescaling CPU limit for UVM sandbox") - cpuLimit = newCPULimit - } - - v1.ProcessorCount = uint32(cpuCount) - v1.ProcessorMaximum = int64(cpuLimit) - v1.ProcessorWeight = uint64(cpuWeight) - - v2Container.Processor = &hcsschema.Processor{ - Count: cpuCount, - Maximum: cpuLimit, - Weight: cpuWeight, - } - - // Memory Resources - memoryMaxInMB := oci.ParseAnnotationsMemory(ctx, coi.Spec, annotations.ContainerMemorySizeInMB, 0) - if memoryMaxInMB > 0 { - v1.MemoryMaximumInMB = int64(memoryMaxInMB) - v2Container.Memory = &hcsschema.Memory{ - SizeInMB: memoryMaxInMB, - } - } - - // Storage Resources - storageBandwidthMax := oci.ParseAnnotationsStorageBps(ctx, coi.Spec, annotations.ContainerStorageQoSBandwidthMaximum, 0) - storageIopsMax := oci.ParseAnnotationsStorageIops(ctx, coi.Spec, annotations.ContainerStorageQoSIopsMaximum, 0) - if storageBandwidthMax > 0 || storageIopsMax > 0 { - v1.StorageBandwidthMaximum = uint64(storageBandwidthMax) - v1.StorageIOPSMaximum = uint64(storageIopsMax) - v2Container.Storage.QoS = &hcsschema.StorageQoS{ - BandwidthMaximum: storageBandwidthMax, - IopsMaximum: storageIopsMax, - } - } - - // TODO V2 networking. Only partial at the moment. v2.Container.Networking.Namespace specifically - if coi.Spec.Windows.Network != nil { - v2Container.Networking = &hcsschema.Networking{} - - v1.EndpointList = coi.Spec.Windows.Network.EndpointList - - // Use the reserved network namespace for containers created inside - // cloned or template UVMs. - if coi.HostingSystem != nil && (coi.HostingSystem.IsTemplate || coi.HostingSystem.IsClone) { - v2Container.Networking.Namespace = uvm.DefaultCloneNetworkNamespaceID - } else { - v2Container.Networking.Namespace = coi.actualNetworkNamespace - } - - v1.AllowUnqualifiedDNSQuery = coi.Spec.Windows.Network.AllowUnqualifiedDNSQuery - v2Container.Networking.AllowUnqualifiedDnsQuery = v1.AllowUnqualifiedDNSQuery - - if coi.Spec.Windows.Network.DNSSearchList != nil { - v1.DNSSearchList = strings.Join(coi.Spec.Windows.Network.DNSSearchList, ",") - v2Container.Networking.DnsSearchList = v1.DNSSearchList - } - - v1.NetworkSharedContainerName = coi.Spec.Windows.Network.NetworkSharedContainerName - v2Container.Networking.NetworkSharedContainerName = v1.NetworkSharedContainerName - } - - if cs, ok := coi.Spec.Windows.CredentialSpec.(string); ok { - v1.Credentials = cs - // If this is a HCS v2 schema container, we created the CCG instance - // with the other container resources. Pass the CCG state information - // as part of the container document. - if coi.ccgState != nil { - v2Container.ContainerCredentialGuard = coi.ccgState - } - } - - if coi.Spec.Root == nil { - return nil, nil, fmt.Errorf("spec is invalid - root isn't populated") - } - - if coi.Spec.Root.Readonly { - return nil, nil, fmt.Errorf(`invalid container spec - readonly is not supported for Windows containers`) - } - - // Strip off the top-most RW/scratch layer as that's passed in separately to HCS for v1 - v1.LayerFolderPath = coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1] - - if coi.isV2Argon() || coi.isV1Argon() { - // Argon v1 or v2. - const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$` - if matched, err := regexp.MatchString(volumeGUIDRegex, coi.Spec.Root.Path); !matched || err != nil { - return nil, nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, coi.Spec.Root.Path) - } - if coi.Spec.Root.Path[len(coi.Spec.Root.Path)-1] != '\\' { - coi.Spec.Root.Path += `\` // Be nice to clients and make sure well-formed for back-compat - } - v1.VolumePath = coi.Spec.Root.Path[:len(coi.Spec.Root.Path)-1] // Strip the trailing backslash. Required for v1. - v2Container.Storage.Path = coi.Spec.Root.Path - } else if coi.isV1Xenon() { - // V1 Xenon - v1.HvPartition = true - if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.HyperV == nil { // Be resilient to nil de-reference - return nil, nil, fmt.Errorf(`invalid container spec - Spec.Windows.HyperV is nil`) - } - if coi.Spec.Windows.HyperV.UtilityVMPath != "" { - // Client-supplied utility VM path - v1.HvRuntime = &schema1.HvRuntime{ImagePath: coi.Spec.Windows.HyperV.UtilityVMPath} - } else { - // Client was lazy. Let's locate it from the layer folders instead. - uvmImagePath, err := uvmfolder.LocateUVMFolder(ctx, coi.Spec.Windows.LayerFolders) - if err != nil { - return nil, nil, err - } - v1.HvRuntime = &schema1.HvRuntime{ImagePath: filepath.Join(uvmImagePath, `UtilityVM`)} - } - } else if coi.isV2Xenon() { - // Hosting system was supplied, so is v2 Xenon. - v2Container.Storage.Path = coi.Spec.Root.Path - if coi.HostingSystem.OS() == "windows" { - layers, err := layers.GetHCSLayers(ctx, coi.HostingSystem, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1]) - if err != nil { - return nil, nil, err - } - v2Container.Storage.Layers = layers - } - } - - if coi.isV2Argon() || coi.isV1Argon() { // Argon v1 or v2 - for _, layerPath := range coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1] { - layerID, err := wclayer.LayerID(ctx, layerPath) - if err != nil { - return nil, nil, err - } - v1.Layers = append(v1.Layers, schema1.Layer{ID: layerID.String(), Path: layerPath}) - v2Container.Storage.Layers = append(v2Container.Storage.Layers, hcsschema.Layer{Id: layerID.String(), Path: layerPath}) - } - } - - mounts, err := createMountsConfig(ctx, coi) - if err != nil { - return nil, nil, err - } - v1.MappedDirectories = mounts.mdsv1 - v2Container.MappedDirectories = mounts.mdsv2 - if len(mounts.mpsv1) > 0 && osversion.Build() < osversion.RS3 { - return nil, nil, fmt.Errorf("named pipe mounts are not supported on this version of Windows") - } - v1.MappedPipes = mounts.mpsv1 - v2Container.MappedPipes = mounts.mpsv2 - - // add assigned devices to the container definition - if err := parseAssignedDevices(ctx, coi, v2Container); err != nil { - return nil, nil, err - } - - // add any device extensions - extensions, err := getDeviceExtensions(coi.Spec.Annotations) - if err != nil { - return nil, nil, err - } - v2Container.AdditionalDeviceNamespace = extensions - - // Process dump setup (if requested) - dumpPath := "" - if coi.HostingSystem != nil { - dumpPath = coi.HostingSystem.ProcessDumpLocation() - } - - if specDumpPath, ok := coi.Spec.Annotations[annotations.ContainerProcessDumpLocation]; ok { - // If a process dump path was specified at pod creation time for a hypervisor isolated pod, then - // use this value. If one was specified on the container creation document then override with this - // instead. Unlike Linux, Windows containers can set the dump path on a per container basis. - dumpPath = specDumpPath - } - - if dumpPath != "" { - dumpType, err := parseDumpType(coi.Spec.Annotations) - if err != nil { - return nil, nil, err - } - - // Setup WER registry keys for local process dump creation if specified. - // https://docs.microsoft.com/en-us/windows/win32/wer/collecting-user-mode-dumps - v2Container.RegistryChanges = &hcsschema.RegistryChanges{ - AddValues: []hcsschema.RegistryValue{ - { - Key: &hcsschema.RegistryKey{ - Hive: "Software", - Name: "Microsoft\\Windows\\Windows Error Reporting\\LocalDumps", - }, - Name: "DumpFolder", - StringValue: dumpPath, - Type_: "String", - }, - { - Key: &hcsschema.RegistryKey{ - Hive: "Software", - Name: "Microsoft\\Windows\\Windows Error Reporting\\LocalDumps", - }, - Name: "DumpType", - DWordValue: dumpType, - Type_: "DWord", - }, - }, - } - } - - return v1, v2Container, nil -} - -// parseAssignedDevices parses assigned devices for the container definition -// this is currently supported for v2 argon and xenon only -func parseAssignedDevices(ctx context.Context, coi *createOptionsInternal, v2 *hcsschema.Container) error { - if !coi.isV2Argon() && !coi.isV2Xenon() { - return nil - } - - v2AssignedDevices := []hcsschema.Device{} - for _, d := range coi.Spec.Windows.Devices { - v2Dev := hcsschema.Device{} - switch d.IDType { - case uvm.VPCILocationPathIDType: - v2Dev.LocationPath = d.ID - v2Dev.Type = hcsschema.DeviceInstanceID - case uvm.VPCIClassGUIDTypeLegacy: - v2Dev.InterfaceClassGuid = d.ID - case uvm.VPCIClassGUIDType: - v2Dev.InterfaceClassGuid = d.ID - default: - return fmt.Errorf("specified device %s has unsupported type %s", d.ID, d.IDType) - } - log.G(ctx).WithField("hcsv2 device", v2Dev).Debug("adding assigned device to container doc") - v2AssignedDevices = append(v2AssignedDevices, v2Dev) - } - v2.AssignedDevices = v2AssignedDevices - return nil -} - -// parseDumpType parses the passed in string representation of the local user mode process dump type to the -// corresponding value the registry expects to be set. -// -// See DumpType at https://docs.microsoft.com/en-us/windows/win32/wer/collecting-user-mode-dumps for the mappings -func parseDumpType(annots map[string]string) (int32, error) { - dmpTypeStr := annots[annotations.WCOWProcessDumpType] - switch dmpTypeStr { - case "": - // If no type specified, default to full dumps. - return 2, nil - case "mini": - return 1, nil - case "full": - return 2, nil - default: - return -1, errors.New(`unknown dump type specified, valid values are "mini" or "full"`) - } -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/network.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/network.go deleted file mode 100644 index 27bf2669d2..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/network.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build windows - -package hcsoci - -import ( - "context" - - "github.com/Microsoft/hcsshim/hcn" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/resources" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/sirupsen/logrus" -) - -func createNetworkNamespace(ctx context.Context, coi *createOptionsInternal, r *resources.Resources) error { - op := "hcsoci::createNetworkNamespace" - l := log.G(ctx).WithField(logfields.ContainerID, coi.ID) - l.Debug(op + " - Begin") - defer func() { - l.Debug(op + " - End") - }() - - ns, err := hcn.NewNamespace("").Create() - if err != nil { - return err - } - - log.G(ctx).WithFields(logrus.Fields{ - "netID": ns.Id, - logfields.ContainerID: coi.ID, - }).Info("created network namespace for container") - - r.SetNetNS(ns.Id) - r.SetCreatedNetNS(true) - - endpoints := make([]string, 0) - for _, endpointID := range coi.Spec.Windows.Network.EndpointList { - err = hcn.AddNamespaceEndpoint(ns.Id, endpointID) - if err != nil { - return err - } - log.G(ctx).WithFields(logrus.Fields{ - "netID": ns.Id, - "endpointID": endpointID, - }).Info("added network endpoint to namespace") - endpoints = append(endpoints, endpointID) - } - r.Add(&uvm.NetworkEndpoints{EndpointIDs: endpoints, Namespace: ns.Id}) - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go deleted file mode 100644 index 65e7acf022..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go +++ /dev/null @@ -1,178 +0,0 @@ -//go:build windows -// +build windows - -package hcsoci - -// Contains functions relating to a LCOW container, as opposed to a utility VM - -import ( - "context" - "fmt" - "os" - "path" - "path/filepath" - "strings" - - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - - "github.com/Microsoft/hcsshim/internal/guestpath" - "github.com/Microsoft/hcsshim/internal/layers" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/resources" - "github.com/Microsoft/hcsshim/internal/uvm" -) - -func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *resources.Resources, isSandbox bool) error { - if coi.Spec.Root == nil { - coi.Spec.Root = &specs.Root{} - } - containerRootInUVM := r.ContainerRootInUVM() - if coi.Spec.Windows != nil && len(coi.Spec.Windows.LayerFolders) > 0 { - log.G(ctx).Debug("hcsshim::allocateLinuxResources mounting storage") - rootPath, scratchPath, err := layers.MountLCOWLayers(ctx, coi.actualID, coi.Spec.Windows.LayerFolders, containerRootInUVM, "", coi.HostingSystem) - if err != nil { - return errors.Wrap(err, "failed to mount container storage") - } - coi.Spec.Root.Path = rootPath - layers := layers.NewImageLayers(coi.HostingSystem, containerRootInUVM, coi.Spec.Windows.LayerFolders, "", isSandbox) - r.SetLayers(layers) - r.SetLcowScratchPath(scratchPath) - } else if coi.Spec.Root.Path != "" { - // This is the "Plan 9" root filesystem. - // TODO: We need a test for this. Ask @jstarks how you can even lay this out on Windows. - hostPath := coi.Spec.Root.Path - uvmPathForContainersFileSystem := path.Join(r.ContainerRootInUVM(), guestpath.RootfsPath) - share, err := coi.HostingSystem.AddPlan9(ctx, hostPath, uvmPathForContainersFileSystem, coi.Spec.Root.Readonly, false, nil) - if err != nil { - return errors.Wrap(err, "adding plan9 root") - } - coi.Spec.Root.Path = uvmPathForContainersFileSystem - r.Add(share) - } else { - return errors.New("must provide either Windows.LayerFolders or Root.Path") - } - - for i, mount := range coi.Spec.Mounts { - switch mount.Type { - case "bind": - case "physical-disk": - case "virtual-disk": - default: - // Unknown mount type - continue - } - if mount.Destination == "" || mount.Source == "" { - return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount) - } - - if coi.HostingSystem != nil { - hostPath := mount.Source - uvmPathForShare := path.Join(containerRootInUVM, fmt.Sprintf(guestpath.LCOWMountPathPrefixFmt, i)) - uvmPathForFile := uvmPathForShare - - readOnly := false - for _, o := range mount.Options { - if strings.ToLower(o) == "ro" { - readOnly = true - break - } - } - - l := log.G(ctx).WithField("mount", fmt.Sprintf("%+v", mount)) - if mount.Type == "physical-disk" { - l.Debug("hcsshim::allocateLinuxResources Hot-adding SCSI physical disk for OCI mount") - uvmPathForShare = fmt.Sprintf(guestpath.LCOWGlobalMountPrefixFmt, coi.HostingSystem.UVMMountCounter()) - scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, hostPath, uvmPathForShare, readOnly, mount.Options) - if err != nil { - return errors.Wrapf(err, "adding SCSI physical disk mount %+v", mount) - } - - uvmPathForFile = scsiMount.UVMPath - r.Add(scsiMount) - coi.Spec.Mounts[i].Type = "none" - } else if mount.Type == "virtual-disk" { - l.Debug("hcsshim::allocateLinuxResources Hot-adding SCSI virtual disk for OCI mount") - uvmPathForShare = fmt.Sprintf(guestpath.LCOWGlobalMountPrefixFmt, coi.HostingSystem.UVMMountCounter()) - - // if the scsi device is already attached then we take the uvm path that the function below returns - // that is where it was previously mounted in UVM - scsiMount, err := coi.HostingSystem.AddSCSI( - ctx, - hostPath, - uvmPathForShare, - readOnly, - false, - mount.Options, - uvm.VMAccessTypeIndividual, - ) - if err != nil { - return errors.Wrapf(err, "adding SCSI virtual disk mount %+v", mount) - } - - uvmPathForFile = scsiMount.UVMPath - r.Add(scsiMount) - coi.Spec.Mounts[i].Type = "none" - } else if strings.HasPrefix(mount.Source, guestpath.SandboxMountPrefix) { - // Mounts that map to a path in UVM are specified with 'sandbox://' prefix. - // example: sandbox:///a/dirInUvm destination:/b/dirInContainer - uvmPathForFile = mount.Source - } else if strings.HasPrefix(mount.Source, guestpath.HugePagesMountPrefix) { - // currently we only support 2M hugepage size - hugePageSubDirs := strings.Split(strings.TrimPrefix(mount.Source, guestpath.HugePagesMountPrefix), "/") - if len(hugePageSubDirs) < 2 { - return errors.Errorf( - `%s mount path is invalid, expected format: %s/`, - mount.Source, - guestpath.HugePagesMountPrefix, - ) - } - - // hugepages:// should be followed by pagesize - if hugePageSubDirs[0] != "2M" { - return errors.Errorf(`only 2M (megabytes) pagesize is supported, got %s`, hugePageSubDirs[0]) - } - // Hugepages inside a container are backed by a mount created inside a UVM. - uvmPathForFile = mount.Source - } else { - st, err := os.Stat(hostPath) - if err != nil { - return errors.Wrap(err, "could not open bind mount target") - } - restrictAccess := false - var allowedNames []string - if !st.IsDir() { - // Map the containing directory in, but restrict the share to a single - // file. - var fileName string - hostPath, fileName = filepath.Split(hostPath) - allowedNames = append(allowedNames, fileName) - restrictAccess = true - uvmPathForFile = path.Join(uvmPathForShare, fileName) - } - l.Debug("hcsshim::allocateLinuxResources Hot-adding Plan9 for OCI mount") - - share, err := coi.HostingSystem.AddPlan9(ctx, hostPath, uvmPathForShare, readOnly, restrictAccess, allowedNames) - if err != nil { - return errors.Wrapf(err, "adding plan9 mount %+v", mount) - } - r.Add(share) - } - coi.Spec.Mounts[i].Source = uvmPathForFile - } - } - - if coi.HostingSystem == nil { - return nil - } - - if coi.hasWindowsAssignedDevices() { - windowsDevices, closers, err := handleAssignedDevicesLCOW(ctx, coi.HostingSystem, coi.Spec.Annotations, coi.Spec.Windows.Devices) - if err != nil { - return err - } - r.Add(closers...) - coi.Spec.Windows.Devices = windowsDevices - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go deleted file mode 100644 index 9e08821831..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go +++ /dev/null @@ -1,235 +0,0 @@ -//go:build windows -// +build windows - -package hcsoci - -// Contains functions relating to a WCOW container, as opposed to a utility VM - -import ( - "bytes" - "context" - "fmt" - "os" - "path/filepath" - "strings" - - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - - "github.com/Microsoft/hcsshim/internal/cmd" - "github.com/Microsoft/hcsshim/internal/credentials" - "github.com/Microsoft/hcsshim/internal/guestpath" - "github.com/Microsoft/hcsshim/internal/layers" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/resources" - "github.com/Microsoft/hcsshim/internal/schemaversion" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/Microsoft/hcsshim/internal/wclayer" -) - -const wcowSandboxMountPath = "C:\\SandboxMounts" - -func allocateWindowsResources(ctx context.Context, coi *createOptionsInternal, r *resources.Resources, isSandbox bool) error { - if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.LayerFolders == nil { - return errors.New("field 'Spec.Windows.Layerfolders' is not populated") - } - - scratchFolder := coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1] - - // TODO: Remove this code for auto-creation. Make the caller responsible. - // Create the directory for the RW scratch layer if it doesn't exist - if _, err := os.Stat(scratchFolder); os.IsNotExist(err) { - if err := os.MkdirAll(scratchFolder, 0777); err != nil { - return errors.Wrapf(err, "failed to auto-create container scratch folder %s", scratchFolder) - } - } - - // Create sandbox.vhdx if it doesn't exist in the scratch folder. It's called sandbox.vhdx - // rather than scratch.vhdx as in the v1 schema, it's hard-coded in HCS. - if _, err := os.Stat(filepath.Join(scratchFolder, "sandbox.vhdx")); os.IsNotExist(err) { - if err := wclayer.CreateScratchLayer(ctx, scratchFolder, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1]); err != nil { - return errors.Wrap(err, "failed to CreateSandboxLayer") - } - } - - if coi.Spec.Root == nil { - coi.Spec.Root = &specs.Root{} - } - - if coi.Spec.Root.Path == "" && (coi.HostingSystem != nil || coi.Spec.Windows.HyperV == nil) { - log.G(ctx).Debug("hcsshim::allocateWindowsResources mounting storage") - containerRootInUVM := r.ContainerRootInUVM() - containerRootPath, err := layers.MountWCOWLayers(ctx, coi.actualID, coi.Spec.Windows.LayerFolders, containerRootInUVM, "", coi.HostingSystem) - if err != nil { - return errors.Wrap(err, "failed to mount container storage") - } - coi.Spec.Root.Path = containerRootPath - layers := layers.NewImageLayers(coi.HostingSystem, containerRootInUVM, coi.Spec.Windows.LayerFolders, "", isSandbox) - r.SetLayers(layers) - } - - if err := setupMounts(ctx, coi, r); err != nil { - return err - } - - if cs, ok := coi.Spec.Windows.CredentialSpec.(string); ok { - // Only need to create a CCG instance for v2 containers - if schemaversion.IsV21(coi.actualSchemaVersion) { - hypervisorIsolated := coi.HostingSystem != nil - ccgInstance, ccgResource, err := credentials.CreateCredentialGuard(ctx, coi.actualID, cs, hypervisorIsolated) - if err != nil { - return err - } - coi.ccgState = ccgInstance.CredentialGuard - r.Add(ccgResource) - if hypervisorIsolated { - // If hypervisor isolated we need to add an hvsocket service table entry - // By default HVSocket won't allow something inside the VM to connect - // back to a process on the host. We need to update the HVSocket service table - // to allow a connection to CCG.exe on the host, so that GMSA can function. - // We need to hot add this here because at UVM creation time we don't know what containers - // will be launched in the UVM, nonetheless if they will ask for GMSA. This is a workaround - // for the previous design requirement for CCG V2 where the service entry - // must be present in the UVM'S HCS document before being sent over as hot adding - // an HvSocket service was not possible. - hvSockConfig := ccgInstance.HvSocketConfig - if err := coi.HostingSystem.UpdateHvSocketService(ctx, hvSockConfig.ServiceId, hvSockConfig.ServiceConfig); err != nil { - return errors.Wrap(err, "failed to update hvsocket service") - } - } - } - } - - if coi.HostingSystem != nil { - if coi.hasWindowsAssignedDevices() { - windowsDevices, closers, err := handleAssignedDevicesWindows(ctx, coi.HostingSystem, coi.Spec.Annotations, coi.Spec.Windows.Devices) - if err != nil { - return err - } - r.Add(closers...) - coi.Spec.Windows.Devices = windowsDevices - } - // when driver installation completes, we are guaranteed that the device is ready for use, - // so reinstall drivers to make sure the devices are ready when we proceed. - // TODO katiewasnothere: we should find a way to avoid reinstalling drivers - driverClosers, err := installPodDrivers(ctx, coi.HostingSystem, coi.Spec.Annotations) - if err != nil { - return err - } - r.Add(driverClosers...) - } - - return nil -} - -// setupMounts adds the custom mounts requested in the container configuration of this -// request. -func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.Resources) error { - // Validate each of the mounts. If this is a V2 Xenon, we have to add them as - // VSMB shares to the utility VM. For V1 Xenon and Argons, there's nothing for - // us to do as it's done by HCS. - for _, mount := range coi.Spec.Mounts { - if mount.Destination == "" || mount.Source == "" { - return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount) - } - switch mount.Type { - case "": - case "physical-disk": - case "virtual-disk": - case "extensible-virtual-disk": - default: - return fmt.Errorf("invalid OCI spec - Type '%s' not supported", mount.Type) - } - - if coi.HostingSystem != nil && schemaversion.IsV21(coi.actualSchemaVersion) { - uvmPath := fmt.Sprintf(guestpath.WCOWGlobalMountPrefixFmt, coi.HostingSystem.UVMMountCounter()) - readOnly := false - for _, o := range mount.Options { - if strings.ToLower(o) == "ro" { - readOnly = true - break - } - } - l := log.G(ctx).WithField("mount", fmt.Sprintf("%+v", mount)) - if mount.Type == "physical-disk" { - l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI physical disk for OCI mount") - scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, mount.Source, uvmPath, readOnly, mount.Options) - if err != nil { - return errors.Wrapf(err, "adding SCSI physical disk mount %+v", mount) - } - r.Add(scsiMount) - } else if mount.Type == "virtual-disk" { - l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI virtual disk for OCI mount") - scsiMount, err := coi.HostingSystem.AddSCSI( - ctx, - mount.Source, - uvmPath, - readOnly, - false, - mount.Options, - uvm.VMAccessTypeIndividual, - ) - if err != nil { - return errors.Wrapf(err, "adding SCSI virtual disk mount %+v", mount) - } - r.Add(scsiMount) - } else if mount.Type == "extensible-virtual-disk" { - l.Debug("hcsshim::allocateWindowsResource Hot-adding ExtensibleVirtualDisk") - scsiMount, err := coi.HostingSystem.AddSCSIExtensibleVirtualDisk(ctx, mount.Source, uvmPath, readOnly) - if err != nil { - return errors.Wrapf(err, "adding SCSI EVD mount failed %+v", mount) - } - r.Add(scsiMount) - } else if strings.HasPrefix(mount.Source, guestpath.SandboxMountPrefix) { - // Mounts that map to a path in the UVM are specified with a 'sandbox://' prefix. - // - // Example: sandbox:///a/dirInUvm destination:C:\\dirInContainer. - // - // so first convert to a path in the sandboxmounts path itself. - sandboxPath := convertToWCOWSandboxMountPath(mount.Source) - - // Now we need to exec a process in the vm that will make these directories as theres - // no functionality in the Windows gcs to create an arbitrary directory. - // - // Create the directory, but also run dir afterwards regardless of if mkdir succeeded to handle the case where the directory already exists - // e.g. from a previous container specifying the same mount (and thus creating the same directory). - b := &bytes.Buffer{} - stderr, err := cmd.CreatePipeAndListen(b, false) - if err != nil { - return err - } - req := &cmd.CmdProcessRequest{ - Args: []string{"cmd", "/c", "mkdir", sandboxPath, "&", "dir", sandboxPath}, - Stderr: stderr, - } - exitCode, err := cmd.ExecInUvm(ctx, coi.HostingSystem, req) - if err != nil { - return errors.Wrapf(err, "failed to create sandbox mount directory in utility VM with exit code %d %q", exitCode, b.String()) - } - } else { - if uvm.IsPipe(mount.Source) { - pipe, err := coi.HostingSystem.AddPipe(ctx, mount.Source) - if err != nil { - return errors.Wrap(err, "failed to add named pipe to UVM") - } - r.Add(pipe) - } else { - l.Debug("hcsshim::allocateWindowsResources Hot-adding VSMB share for OCI mount") - options := coi.HostingSystem.DefaultVSMBOptions(readOnly) - share, err := coi.HostingSystem.AddVSMB(ctx, mount.Source, options) - if err != nil { - return errors.Wrapf(err, "failed to add VSMB share to utility VM for mount %+v", mount) - } - r.Add(share) - } - } - } - } - - return nil -} - -func convertToWCOWSandboxMountPath(source string) string { - subPath := strings.TrimPrefix(source, guestpath.SandboxMountPrefix) - return filepath.Join(wcowSandboxMountPath, subPath) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go deleted file mode 100644 index f6d35df0e5..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go +++ /dev/null @@ -1 +0,0 @@ -package hns diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go deleted file mode 100644 index 83b683bd90..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go +++ /dev/null @@ -1,338 +0,0 @@ -//go:build windows - -package hns - -import ( - "encoding/json" - "net" - "strings" - - "github.com/sirupsen/logrus" -) - -// HNSEndpoint represents a network endpoint in HNS -type HNSEndpoint struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - VirtualNetwork string `json:",omitempty"` - VirtualNetworkName string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - MacAddress string `json:",omitempty"` - IPAddress net.IP `json:",omitempty"` - IPv6Address net.IP `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` - DNSDomain string `json:",omitempty"` - GatewayAddress string `json:",omitempty"` - GatewayAddressV6 string `json:",omitempty"` - EnableInternalDNS bool `json:",omitempty"` - DisableICC bool `json:",omitempty"` - PrefixLength uint8 `json:",omitempty"` - IPv6PrefixLength uint8 `json:",omitempty"` - IsRemoteEndpoint bool `json:",omitempty"` - EnableLowMetric bool `json:",omitempty"` - Namespace *Namespace `json:",omitempty"` - EncapOverhead uint16 `json:",omitempty"` - SharedContainers []string `json:",omitempty"` -} - -//SystemType represents the type of the system on which actions are done -type SystemType string - -// SystemType const -const ( - ContainerType SystemType = "Container" - VirtualMachineType SystemType = "VirtualMachine" - HostType SystemType = "Host" -) - -// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type EndpointAttachDetachRequest struct { - ContainerID string `json:"ContainerId,omitempty"` - SystemType SystemType `json:"SystemType"` - CompartmentID uint16 `json:"CompartmentId,omitempty"` - VirtualNICName string `json:"VirtualNicName,omitempty"` -} - -// EndpointResquestResponse is object to get the endpoint request response -type EndpointResquestResponse struct { - Success bool - Error string -} - -// EndpointStats is the object that has stats for a given endpoint -type EndpointStats struct { - BytesReceived uint64 `json:"BytesReceived"` - BytesSent uint64 `json:"BytesSent"` - DroppedPacketsIncoming uint64 `json:"DroppedPacketsIncoming"` - DroppedPacketsOutgoing uint64 `json:"DroppedPacketsOutgoing"` - EndpointID string `json:"EndpointId"` - InstanceID string `json:"InstanceId"` - PacketsReceived uint64 `json:"PacketsReceived"` - PacketsSent uint64 `json:"PacketsSent"` -} - -// HNSEndpointRequest makes a HNS call to modify/query a network endpoint -func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { - endpoint := &HNSEndpoint{} - err := hnsCall(method, "/endpoints/"+path, request, &endpoint) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -// HNSListEndpointRequest makes a HNS call to query the list of available endpoints -func HNSListEndpointRequest() ([]HNSEndpoint, error) { - var endpoint []HNSEndpoint - err := hnsCall("GET", "/endpoints/", "", &endpoint) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -// hnsEndpointStatsRequest makes a HNS call to query the stats for a given endpoint ID -func hnsEndpointStatsRequest(id string) (*EndpointStats, error) { - var stats EndpointStats - err := hnsCall("GET", "/endpointstats/"+id, "", &stats) - if err != nil { - return nil, err - } - - return &stats, nil -} - -// GetHNSEndpointByID get the Endpoint by ID -func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { - return HNSEndpointRequest("GET", endpointID, "") -} - -// GetHNSEndpointStats get the stats for a n Endpoint by ID -func GetHNSEndpointStats(endpointID string) (*EndpointStats, error) { - return hnsEndpointStatsRequest(endpointID) -} - -// GetHNSEndpointByName gets the endpoint filtered by Name -func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { - hnsResponse, err := HNSListEndpointRequest() - if err != nil { - return nil, err - } - for _, hnsEndpoint := range hnsResponse { - if hnsEndpoint.Name == endpointName { - return &hnsEndpoint, nil - } - } - return nil, EndpointNotFoundError{EndpointName: endpointName} -} - -type endpointAttachInfo struct { - SharedContainers json.RawMessage `json:",omitempty"` -} - -func (endpoint *HNSEndpoint) IsAttached(vID string) (bool, error) { - attachInfo := endpointAttachInfo{} - err := hnsCall("GET", "/endpoints/"+endpoint.Id, "", &attachInfo) - - // Return false allows us to just return the err - if err != nil { - return false, err - } - - if strings.Contains(strings.ToLower(string(attachInfo.SharedContainers)), strings.ToLower(vID)) { - return true, nil - } - - return false, nil -} - -// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods -func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) { - operation := "Create" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - jsonString, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - return HNSEndpointRequest("POST", "", string(jsonString)) -} - -// Delete Endpoint by sending EndpointRequest to HNS -func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) { - operation := "Delete" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - return HNSEndpointRequest("DELETE", endpoint.Id, "") -} - -// Update Endpoint -func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) { - operation := "Update" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - jsonString, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint) - - return endpoint, err -} - -// ApplyACLPolicy applies a set of ACL Policies on the Endpoint -func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error { - operation := "ApplyACLPolicy" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - for _, policy := range policies { - if policy == nil { - continue - } - jsonString, err := json.Marshal(policy) - if err != nil { - return err - } - endpoint.Policies = append(endpoint.Policies, jsonString) - } - - _, err := endpoint.Update() - return err -} - -// ApplyProxyPolicy applies a set of Proxy Policies on the Endpoint -func (endpoint *HNSEndpoint) ApplyProxyPolicy(policies ...*ProxyPolicy) error { - operation := "ApplyProxyPolicy" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - for _, policy := range policies { - if policy == nil { - continue - } - jsonString, err := json.Marshal(policy) - if err != nil { - return err - } - endpoint.Policies = append(endpoint.Policies, jsonString) - } - - _, err := endpoint.Update() - return err -} - -// ContainerAttach attaches an endpoint to container -func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error { - operation := "ContainerAttach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - requestMessage := &EndpointAttachDetachRequest{ - ContainerID: containerID, - CompartmentID: compartmentID, - SystemType: ContainerType, - } - response := &EndpointResquestResponse{} - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) -} - -// ContainerDetach detaches an endpoint from container -func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error { - operation := "ContainerDetach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - requestMessage := &EndpointAttachDetachRequest{ - ContainerID: containerID, - SystemType: ContainerType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) -} - -// HostAttach attaches a nic on the host -func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error { - operation := "HostAttach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - requestMessage := &EndpointAttachDetachRequest{ - CompartmentID: compartmentID, - SystemType: HostType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) -} - -// HostDetach detaches a nic on the host -func (endpoint *HNSEndpoint) HostDetach() error { - operation := "HostDetach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - requestMessage := &EndpointAttachDetachRequest{ - SystemType: HostType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) -} - -// VirtualMachineNICAttach attaches a endpoint to a virtual machine -func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error { - operation := "VirtualMachineNicAttach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - requestMessage := &EndpointAttachDetachRequest{ - VirtualNICName: virtualMachineNICName, - SystemType: VirtualMachineType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) -} - -// VirtualMachineNICDetach detaches a endpoint from a virtual machine -func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error { - operation := "VirtualMachineNicDetach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - requestMessage := &EndpointAttachDetachRequest{ - SystemType: VirtualMachineType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go deleted file mode 100644 index 0a8f36d832..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build windows - -package hns - -import ( - "encoding/json" - "fmt" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) { - var responseBuffer *uint16 - logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) - - err := _hnsCall(method, path, request, &responseBuffer) - if err != nil { - return nil, hcserror.New(err, "hnsCall ", "") - } - response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) - - hnsresponse := &hnsResponse{} - if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { - return nil, err - } - return hnsresponse, nil -} - -func hnsCall(method, path, request string, returnResponse interface{}) error { - hnsresponse, err := hnsCallRawResponse(method, path, request) - if err != nil { - return fmt.Errorf("failed during hnsCallRawResponse: %v", err) - } - if !hnsresponse.Success { - return fmt.Errorf("hns failed with error : %s", hnsresponse.Error) - } - - if len(hnsresponse.Output) == 0 { - return nil - } - - logrus.Debugf("Network Response : %s", hnsresponse.Output) - err = json.Unmarshal(hnsresponse.Output, returnResponse) - if err != nil { - return err - } - - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go deleted file mode 100644 index 464bb8954f..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build windows - -package hns - -type HNSGlobals struct { - Version HNSVersion `json:"Version"` -} - -type HNSVersion struct { - Major int `json:"Major"` - Minor int `json:"Minor"` -} - -var ( - HNSVersion1803 = HNSVersion{Major: 7, Minor: 2} -) - -func GetHNSGlobals() (*HNSGlobals, error) { - var version HNSVersion - err := hnsCall("GET", "/globals/version", "", &version) - if err != nil { - return nil, err - } - - globals := &HNSGlobals{ - Version: version, - } - - return globals, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go deleted file mode 100644 index 8861faee7a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go +++ /dev/null @@ -1,144 +0,0 @@ -//go:build windows - -package hns - -import ( - "encoding/json" - "errors" - "net" - - "github.com/sirupsen/logrus" -) - -// Subnet is associated with a network and represents a list -// of subnets available to the network -type Subnet struct { - AddressPrefix string `json:",omitempty"` - GatewayAddress string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` -} - -// MacPool is associated with a network and represents a list -// of macaddresses available to the network -type MacPool struct { - StartMacAddress string `json:",omitempty"` - EndMacAddress string `json:",omitempty"` -} - -// HNSNetwork represents a network in HNS -type HNSNetwork struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - Type string `json:",omitempty"` - NetworkAdapterName string `json:",omitempty"` - SourceMac string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - MacPools []MacPool `json:",omitempty"` - Subnets []Subnet `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` - DNSServerCompartment uint32 `json:",omitempty"` - ManagementIP string `json:",omitempty"` - AutomaticDNS bool `json:",omitempty"` -} - -type hnsResponse struct { - Success bool - Error string - Output json.RawMessage -} - -// HNSNetworkRequest makes a call into HNS to update/query a single network -func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { - var network HNSNetwork - err := hnsCall(method, "/networks/"+path, request, &network) - if err != nil { - return nil, err - } - - return &network, nil -} - -// HNSListNetworkRequest makes a HNS call to query the list of available networks -func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { - var network []HNSNetwork - err := hnsCall(method, "/networks/"+path, request, &network) - if err != nil { - return nil, err - } - - return network, nil -} - -// GetHNSNetworkByID -func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { - return HNSNetworkRequest("GET", networkID, "") -} - -// GetHNSNetworkName filtered by Name -func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { - hsnnetworks, err := HNSListNetworkRequest("GET", "", "") - if err != nil { - return nil, err - } - for _, hnsnetwork := range hsnnetworks { - if hnsnetwork.Name == networkName { - return &hnsnetwork, nil - } - } - return nil, NetworkNotFoundError{NetworkName: networkName} -} - -// Create Network by sending NetworkRequest to HNS. -func (network *HNSNetwork) Create() (*HNSNetwork, error) { - operation := "Create" - title := "hcsshim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s", network.Id) - - for _, subnet := range network.Subnets { - if (subnet.AddressPrefix != "") && (subnet.GatewayAddress == "") { - return nil, errors.New("network create error, subnet has address prefix but no gateway specified") - } - } - - jsonString, err := json.Marshal(network) - if err != nil { - return nil, err - } - return HNSNetworkRequest("POST", "", string(jsonString)) -} - -// Delete Network by sending NetworkRequest to HNS -func (network *HNSNetwork) Delete() (*HNSNetwork, error) { - operation := "Delete" - title := "hcsshim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s", network.Id) - - return HNSNetworkRequest("DELETE", network.Id, "") -} - -// Creates an endpoint on the Network. -func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint { - return &HNSEndpoint{ - VirtualNetwork: network.Id, - IPAddress: ipAddress, - MacAddress: string(macAddress), - } -} - -func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { - operation := "CreateEndpoint" - title := "hcsshim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id) - - endpoint.VirtualNetwork = network.Id - return endpoint.Create() -} - -func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { - operation := "CreateRemoteEndpoint" - title := "hcsshim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s", network.Id) - endpoint.IsRemoteEndpoint = true - return network.CreateEndpoint(endpoint) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go deleted file mode 100644 index 082c018a4e..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go +++ /dev/null @@ -1,110 +0,0 @@ -package hns - -// Type of Request Support in ModifySystem -type PolicyType string - -// RequestType const -const ( - Nat PolicyType = "NAT" - ACL PolicyType = "ACL" - PA PolicyType = "PA" - VLAN PolicyType = "VLAN" - VSID PolicyType = "VSID" - VNet PolicyType = "VNET" - L2Driver PolicyType = "L2Driver" - Isolation PolicyType = "Isolation" - QOS PolicyType = "QOS" - OutboundNat PolicyType = "OutBoundNAT" - ExternalLoadBalancer PolicyType = "ELB" - Route PolicyType = "ROUTE" - Proxy PolicyType = "PROXY" -) - -type NatPolicy struct { - Type PolicyType `json:"Type"` - Protocol string `json:",omitempty"` - InternalPort uint16 `json:",omitempty"` - ExternalPort uint16 `json:",omitempty"` - ExternalPortReserved bool `json:",omitempty"` -} - -type QosPolicy struct { - Type PolicyType `json:"Type"` - MaximumOutgoingBandwidthInBytes uint64 -} - -type IsolationPolicy struct { - Type PolicyType `json:"Type"` - VLAN uint - VSID uint - InDefaultIsolation bool -} - -type VlanPolicy struct { - Type PolicyType `json:"Type"` - VLAN uint -} - -type VsidPolicy struct { - Type PolicyType `json:"Type"` - VSID uint -} - -type PaPolicy struct { - Type PolicyType `json:"Type"` - PA string `json:"PA"` -} - -type OutboundNatPolicy struct { - Policy - VIP string `json:"VIP,omitempty"` - Exceptions []string `json:"ExceptionList,omitempty"` - Destinations []string `json:",omitempty"` -} - -type ProxyPolicy struct { - Type PolicyType `json:"Type"` - IP string `json:",omitempty"` - Port string `json:",omitempty"` - ExceptionList []string `json:",omitempty"` - Destination string `json:",omitempty"` - OutboundNat bool `json:",omitempty"` -} - -type ActionType string -type DirectionType string -type RuleType string - -const ( - Allow ActionType = "Allow" - Block ActionType = "Block" - - In DirectionType = "In" - Out DirectionType = "Out" - - Host RuleType = "Host" - Switch RuleType = "Switch" -) - -type ACLPolicy struct { - Type PolicyType `json:"Type"` - Id string `json:"Id,omitempty"` - Protocol uint16 `json:",omitempty"` - Protocols string `json:"Protocols,omitempty"` - InternalPort uint16 `json:",omitempty"` - Action ActionType - Direction DirectionType - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:"LocalPorts,omitempty"` - LocalPort uint16 `json:",omitempty"` - RemotePorts string `json:"RemotePorts,omitempty"` - RemotePort uint16 `json:",omitempty"` - RuleType RuleType `json:"RuleType,omitempty"` - Priority uint16 `json:",omitempty"` - ServiceName string `json:",omitempty"` -} - -type Policy struct { - Type PolicyType `json:"Type"` -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go deleted file mode 100644 index b98db40e8d..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go +++ /dev/null @@ -1,203 +0,0 @@ -//go:build windows - -package hns - -import ( - "encoding/json" - - "github.com/sirupsen/logrus" -) - -// RoutePolicy is a structure defining schema for Route based Policy -type RoutePolicy struct { - Policy - DestinationPrefix string `json:"DestinationPrefix,omitempty"` - NextHop string `json:"NextHop,omitempty"` - EncapEnabled bool `json:"NeedEncap,omitempty"` -} - -// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy -type ELBPolicy struct { - LBPolicy - SourceVIP string `json:"SourceVIP,omitempty"` - VIPs []string `json:"VIPs,omitempty"` - ILB bool `json:"ILB,omitempty"` - DSR bool `json:"IsDSR,omitempty"` -} - -// LBPolicy is a structure defining schema for LoadBalancing based Policy -type LBPolicy struct { - Policy - Protocol uint16 `json:"Protocol,omitempty"` - InternalPort uint16 - ExternalPort uint16 -} - -// PolicyList is a structure defining schema for Policy list request -type PolicyList struct { - ID string `json:"ID,omitempty"` - EndpointReferences []string `json:"References,omitempty"` - Policies []json.RawMessage `json:"Policies,omitempty"` -} - -// HNSPolicyListRequest makes a call into HNS to update/query a single network -func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { - var policy PolicyList - err := hnsCall(method, "/policylists/"+path, request, &policy) - if err != nil { - return nil, err - } - - return &policy, nil -} - -// HNSListPolicyListRequest gets all the policy list -func HNSListPolicyListRequest() ([]PolicyList, error) { - var plist []PolicyList - err := hnsCall("GET", "/policylists/", "", &plist) - if err != nil { - return nil, err - } - - return plist, nil -} - -// PolicyListRequest makes a HNS call to modify/query a network policy list -func PolicyListRequest(method, path, request string) (*PolicyList, error) { - policylist := &PolicyList{} - err := hnsCall(method, "/policylists/"+path, request, &policylist) - if err != nil { - return nil, err - } - - return policylist, nil -} - -// GetPolicyListByID get the policy list by ID -func GetPolicyListByID(policyListID string) (*PolicyList, error) { - return PolicyListRequest("GET", policyListID, "") -} - -// Create PolicyList by sending PolicyListRequest to HNS. -func (policylist *PolicyList) Create() (*PolicyList, error) { - operation := "Create" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" id=%s", policylist.ID) - jsonString, err := json.Marshal(policylist) - if err != nil { - return nil, err - } - return PolicyListRequest("POST", "", string(jsonString)) -} - -// Delete deletes PolicyList -func (policylist *PolicyList) Delete() (*PolicyList, error) { - operation := "Delete" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" id=%s", policylist.ID) - - return PolicyListRequest("DELETE", policylist.ID, "") -} - -// AddEndpoint add an endpoint to a Policy List -func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { - operation := "AddEndpoint" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) - - _, err := policylist.Delete() - if err != nil { - return nil, err - } - - // Add Endpoint to the Existing List - policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) - - return policylist.Create() -} - -// RemoveEndpoint removes an endpoint from the Policy List -func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { - operation := "RemoveEndpoint" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) - - _, err := policylist.Delete() - if err != nil { - return nil, err - } - - elementToRemove := "/endpoints/" + endpoint.Id - - var references []string - - for _, endpointReference := range policylist.EndpointReferences { - if endpointReference == elementToRemove { - continue - } - references = append(references, endpointReference) - } - policylist.EndpointReferences = references - return policylist.Create() -} - -// AddLoadBalancer policy list for the specified endpoints -func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { - operation := "AddLoadBalancer" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) - - policylist := &PolicyList{} - - elbPolicy := &ELBPolicy{ - SourceVIP: sourceVIP, - ILB: isILB, - } - - if len(vip) > 0 { - elbPolicy.VIPs = []string{vip} - } - elbPolicy.Type = ExternalLoadBalancer - elbPolicy.Protocol = protocol - elbPolicy.InternalPort = internalPort - elbPolicy.ExternalPort = externalPort - - for _, endpoint := range endpoints { - policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) - } - - jsonString, err := json.Marshal(elbPolicy) - if err != nil { - return nil, err - } - policylist.Policies = append(policylist.Policies, jsonString) - return policylist.Create() -} - -// AddRoute adds route policy list for the specified endpoints -func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { - operation := "AddRoute" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix) - - policylist := &PolicyList{} - - rPolicy := &RoutePolicy{ - DestinationPrefix: destinationPrefix, - NextHop: nextHop, - EncapEnabled: encapEnabled, - } - rPolicy.Type = Route - - for _, endpoint := range endpoints { - policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) - } - - jsonString, err := json.Marshal(rPolicy) - if err != nil { - return nil, err - } - - policylist.Policies = append(policylist.Policies, jsonString) - return policylist.Create() -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go deleted file mode 100644 index b9c30b9019..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build windows - -package hns - -import ( - "github.com/sirupsen/logrus" -) - -type HNSSupportedFeatures struct { - Acl HNSAclFeatures `json:"ACL"` -} - -type HNSAclFeatures struct { - AclAddressLists bool `json:"AclAddressLists"` - AclNoHostRulePriority bool `json:"AclHostRulePriority"` - AclPortRanges bool `json:"AclPortRanges"` - AclRuleId bool `json:"AclRuleId"` -} - -func GetHNSSupportedFeatures() HNSSupportedFeatures { - var hnsFeatures HNSSupportedFeatures - - globals, err := GetHNSGlobals() - if err != nil { - // Expected on pre-1803 builds, all features will be false/unsupported - logrus.Debugf("Unable to obtain HNS globals: %s", err) - return hnsFeatures - } - - hnsFeatures.Acl = HNSAclFeatures{ - AclAddressLists: isHNSFeatureSupported(globals.Version, HNSVersion1803), - AclNoHostRulePriority: isHNSFeatureSupported(globals.Version, HNSVersion1803), - AclPortRanges: isHNSFeatureSupported(globals.Version, HNSVersion1803), - AclRuleId: isHNSFeatureSupported(globals.Version, HNSVersion1803), - } - - return hnsFeatures -} - -func isHNSFeatureSupported(currentVersion HNSVersion, minVersionSupported HNSVersion) bool { - if currentVersion.Major < minVersionSupported.Major { - return false - } - if currentVersion.Major > minVersionSupported.Major { - return true - } - if currentVersion.Minor < minVersionSupported.Minor { - return false - } - return true -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go deleted file mode 100644 index 749588ad39..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go +++ /dev/null @@ -1,113 +0,0 @@ -//go:build windows - -package hns - -import ( - "encoding/json" - "fmt" - "os" - "path" - "strings" -) - -type namespaceRequest struct { - IsDefault bool `json:",omitempty"` -} - -type namespaceEndpointRequest struct { - ID string `json:"Id"` -} - -type NamespaceResource struct { - Type string - Data json.RawMessage -} - -type namespaceResourceRequest struct { - Type string - Data interface{} -} - -type Namespace struct { - ID string - IsDefault bool `json:",omitempty"` - ResourceList []NamespaceResource `json:",omitempty"` - CompartmentId uint32 `json:",omitempty"` -} - -func issueNamespaceRequest(id *string, method, subpath string, request interface{}) (*Namespace, error) { - var err error - hnspath := "/namespaces/" - if id != nil { - hnspath = path.Join(hnspath, *id) - } - if subpath != "" { - hnspath = path.Join(hnspath, subpath) - } - var reqJSON []byte - if request != nil { - if reqJSON, err = json.Marshal(request); err != nil { - return nil, err - } - } - var ns Namespace - err = hnsCall(method, hnspath, string(reqJSON), &ns) - if err != nil { - if strings.Contains(err.Error(), "Element not found.") { - return nil, os.ErrNotExist - } - return nil, fmt.Errorf("%s %s: %s", method, hnspath, err) - } - return &ns, err -} - -func CreateNamespace() (string, error) { - req := namespaceRequest{} - ns, err := issueNamespaceRequest(nil, "POST", "", &req) - if err != nil { - return "", err - } - return ns.ID, nil -} - -func RemoveNamespace(id string) error { - _, err := issueNamespaceRequest(&id, "DELETE", "", nil) - return err -} - -func GetNamespaceEndpoints(id string) ([]string, error) { - ns, err := issueNamespaceRequest(&id, "GET", "", nil) - if err != nil { - return nil, err - } - var endpoints []string - for _, rsrc := range ns.ResourceList { - if rsrc.Type == "Endpoint" { - var endpoint namespaceEndpointRequest - err = json.Unmarshal(rsrc.Data, &endpoint) - if err != nil { - return nil, fmt.Errorf("unmarshal endpoint: %s", err) - } - endpoints = append(endpoints, endpoint.ID) - } - } - return endpoints, nil -} - -func AddNamespaceEndpoint(id string, endpointID string) error { - resource := namespaceResourceRequest{ - Type: "Endpoint", - Data: namespaceEndpointRequest{endpointID}, - } - _, err := issueNamespaceRequest(&id, "POST", "addresource", &resource) - return err -} - -func RemoveNamespaceEndpoint(id string, endpointID string) error { - resource := namespaceResourceRequest{ - Type: "Endpoint", - Data: namespaceEndpointRequest{endpointID}, - } - _, err := issueNamespaceRequest(&id, "POST", "removeresource", &resource) - return err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hooks/spec.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hooks/spec.go deleted file mode 100644 index 51ba3aa592..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hooks/spec.go +++ /dev/null @@ -1,53 +0,0 @@ -package hooks - -import ( - "fmt" - - oci "github.com/opencontainers/runtime-spec/specs-go" -) - -// Note: The below type definition as well as constants have been copied from -// https://github.com/opencontainers/runc/blob/master/libcontainer/configs/config.go. -// This is done to not introduce a direct dependency on runc, which would complicate -// integration with windows. -type HookName string - -const ( - - // Prestart commands are executed after the container namespaces are created, - // but before the user supplied command is executed from init. - // Note: This hook is now deprecated - // Prestart commands are called in the Runtime namespace. - Prestart HookName = "prestart" - - // CreateRuntime commands MUST be called as part of the create operation after - // the runtime environment has been created but before the pivot_root has been executed. - // CreateRuntime is called immediately after the deprecated Prestart hook. - // CreateRuntime commands are called in the Runtime Namespace. - CreateRuntime HookName = "createRuntime" -) - -// NewOCIHook creates a new oci.Hook with given parameters -func NewOCIHook(path string, args, env []string) oci.Hook { - return oci.Hook{ - Path: path, - Args: args, - Env: env, - } -} - -// AddOCIHook adds oci.Hook of the given hook name to spec -func AddOCIHook(spec *oci.Spec, hn HookName, hk oci.Hook) error { - if spec.Hooks == nil { - spec.Hooks = &oci.Hooks{} - } - switch hn { - case Prestart: - spec.Hooks.Prestart = append(spec.Hooks.Prestart, hk) - case CreateRuntime: - spec.Hooks.CreateRuntime = append(spec.Hooks.CreateRuntime, hk) - default: - return fmt.Errorf("hook %q is not supported", hn) - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go deleted file mode 100644 index cb554867fe..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go +++ /dev/null @@ -1 +0,0 @@ -package interop diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/test/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go deleted file mode 100644 index 137dc3990a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build windows - -package interop - -import ( - "syscall" - "unsafe" -) - -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go - -//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree - -func ConvertAndFreeCoTaskMemString(buffer *uint16) string { - str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:]) - coTaskMemFree(unsafe.Pointer(buffer)) - return str -} - -func Win32FromHresult(hr uintptr) syscall.Errno { - if hr&0x1fff0000 == 0x00070000 { - return syscall.Errno(hr & 0xffff) - } - return syscall.Errno(hr) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go deleted file mode 100644 index 34b53d6e48..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// This package provides higher level constructs for the win32 job object API. -// Most of the core creation and management functions are already present in "golang.org/x/sys/windows" -// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information -// structs and associated limit flags. Whatever is not present from the job object API -// in golang.org/x/sys/windows is located in /internal/winapi. -// -// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects -package jobobject diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/test/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go deleted file mode 100644 index d31a6a1e66..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go +++ /dev/null @@ -1,113 +0,0 @@ -//go:build windows - -package jobobject - -import ( - "context" - "fmt" - "sync" - "unsafe" - - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/queue" - "github.com/Microsoft/hcsshim/internal/winapi" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -var ( - ioInitOnce sync.Once - initIOErr error - // Global iocp handle that will be re-used for every job object - ioCompletionPort windows.Handle - // Mapping of job handle to queue to place notifications in. - jobMap sync.Map -) - -// MsgAllProcessesExited is a type representing a message that every process in a job has exited. -type MsgAllProcessesExited struct{} - -// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently. -// This should not be treated as an error. -type MsgUnimplemented struct{} - -// pollIOCP polls the io completion port forever. -func pollIOCP(ctx context.Context, iocpHandle windows.Handle) { - var ( - overlapped uintptr - code uint32 - key uintptr - ) - - for { - err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE) - if err != nil { - log.G(ctx).WithError(err).Error("failed to poll for job object message") - continue - } - if val, ok := jobMap.Load(key); ok { - msq, ok := val.(*queue.MessageQueue) - if !ok { - log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map") - continue - } - notification, err := parseMessage(code, overlapped) - if err != nil { - log.G(ctx).WithFields(logrus.Fields{ - "code": code, - "overlapped": overlapped, - }).Warn("failed to parse job object message") - continue - } - if err := msq.Write(notification); err == queue.ErrQueueClosed { - // Write will only return an error when the queue is closed. - // The only time a queue would ever be closed is when we call `Close` on - // the job it belongs to which also removes it from the jobMap, so something - // went wrong here. We can't return as this is reading messages for all jobs - // so just log it and move on. - log.G(ctx).WithFields(logrus.Fields{ - "code": code, - "overlapped": overlapped, - }).Warn("tried to write to a closed queue") - continue - } - } else { - log.G(ctx).Warn("received a message for a job not present in the mapping") - } - } -} - -func parseMessage(code uint32, overlapped uintptr) (interface{}, error) { - // Check code and parse out relevant information related to that notification - // that we care about. For now all we handle is the message that all processes - // in the job have exited. - switch code { - case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO: - return MsgAllProcessesExited{}, nil - // Other messages for completeness and a check to make sure that if we fall - // into the default case that this is a code we don't know how to handle. - case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME: - case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME: - case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT: - case winapi.JOB_OBJECT_MSG_NEW_PROCESS: - case winapi.JOB_OBJECT_MSG_EXIT_PROCESS: - case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS: - case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT: - case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT: - case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT: - default: - return nil, fmt.Errorf("unknown job notification type: %d", code) - } - return MsgUnimplemented{}, nil -} - -// Assigns an IO completion port to get notified of events for the registered job -// object. -func attachIOCP(job windows.Handle, iocp windows.Handle) error { - info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{ - CompletionKey: job, - CompletionPort: iocp, - } - _, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) - return err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/test/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go deleted file mode 100644 index 8c0c979402..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go +++ /dev/null @@ -1,317 +0,0 @@ -//go:build windows - -package jobobject - -import ( - "errors" - "fmt" - "unsafe" - - "github.com/Microsoft/hcsshim/internal/winapi" - "golang.org/x/sys/windows" -) - -const ( - memoryLimitMax uint64 = 0xffffffffffffffff -) - -func isFlagSet(flag, controlFlags uint32) bool { - return (flag & controlFlags) == flag -} - -// SetResourceLimits sets resource limits on the job object (cpu, memory, storage). -func (job *JobObject) SetResourceLimits(limits *JobLimits) error { - // Go through and check what limits were specified and apply them to the job. - if limits.MemoryLimitInBytes != 0 { - if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil { - return fmt.Errorf("failed to set job object memory limit: %w", err) - } - } - - if limits.CPULimit != 0 { - if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil { - return fmt.Errorf("failed to set job object cpu limit: %w", err) - } - } else if limits.CPUWeight != 0 { - if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil { - return fmt.Errorf("failed to set job object cpu limit: %w", err) - } - } - - if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 { - if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil { - return fmt.Errorf("failed to set io limit on job object: %w", err) - } - } - return nil -} - -// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate -// all processes in the job on the last open handle being closed. -func (job *JobObject) SetTerminateOnLastHandleClose() error { - info, err := job.getExtendedInformation() - if err != nil { - return err - } - info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE - return job.setExtendedInformation(info) -} - -// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`. -func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error { - if memoryLimitInBytes >= memoryLimitMax { - return errors.New("memory limit specified exceeds the max size") - } - - info, err := job.getExtendedInformation() - if err != nil { - return err - } - - info.JobMemoryLimit = uintptr(memoryLimitInBytes) - info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY - return job.setExtendedInformation(info) -} - -// GetMemoryLimit gets the memory limit in bytes of the job object. -func (job *JobObject) GetMemoryLimit() (uint64, error) { - info, err := job.getExtendedInformation() - if err != nil { - return 0, err - } - return uint64(info.JobMemoryLimit), nil -} - -// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to -// `rateControlValue` for the job object. -func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error { - cpuInfo, err := job.getCPURateControlInformation() - if err != nil { - return err - } - switch rateControlType { - case WeightBased: - if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax { - return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue) - } - cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED - cpuInfo.Value = rateControlValue - case RateBased: - if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax { - return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue) - } - cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP - cpuInfo.Value = rateControlValue - default: - return errors.New("invalid job object cpu rate control type") - } - return job.setCPURateControlInfo(cpuInfo) -} - -// GetCPULimit gets the cpu limits for the job object. -// `rateControlType` is used to indicate what type of cpu limit to query for. -func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) { - info, err := job.getCPURateControlInformation() - if err != nil { - return 0, err - } - - if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) { - return 0, errors.New("the job does not have cpu rate control enabled") - } - - switch rateControlType { - case WeightBased: - if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) { - return 0, errors.New("cannot get cpu weight for job object without cpu weight option set") - } - case RateBased: - if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) { - return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set") - } - default: - return 0, errors.New("invalid job object cpu rate control type") - } - return info.Value, nil -} - -// SetCPUAffinity sets the processor affinity for the job object. -// The affinity is passed in as a bitmask. -func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { - info, err := job.getExtendedInformation() - if err != nil { - return err - } - info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY) - info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) - return job.setExtendedInformation(info) -} - -// GetCPUAffinity gets the processor affinity for the job object. -// The returned affinity is a bitmask. -func (job *JobObject) GetCPUAffinity() (uint64, error) { - info, err := job.getExtendedInformation() - if err != nil { - return 0, err - } - return uint64(info.BasicLimitInformation.Affinity), nil -} - -// SetIOLimit sets the IO limits specified on the job object. -func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error { - ioInfo, err := job.getIOLimit() - if err != nil { - return err - } - ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE - if maxBandwidth != 0 { - ioInfo.MaxBandwidth = maxBandwidth - } - if maxIOPS != 0 { - ioInfo.MaxIops = maxIOPS - } - return job.setIORateControlInfo(ioInfo) -} - -// GetIOMaxBandwidthLimit gets the max bandwidth for the job object. -func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) { - info, err := job.getIOLimit() - if err != nil { - return 0, err - } - return info.MaxBandwidth, nil -} - -// GetIOMaxIopsLimit gets the max iops for the job object. -func (job *JobObject) GetIOMaxIopsLimit() (int64, error) { - info, err := job.getIOLimit() - if err != nil { - return 0, err - } - return info.MaxIops, nil -} - -// Helper function for getting a job object's extended information. -func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return nil, ErrAlreadyClosed - } - - info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{} - if err := winapi.QueryInformationJobObject( - job.handle, - windows.JobObjectExtendedLimitInformation, - uintptr(unsafe.Pointer(&info)), - uint32(unsafe.Sizeof(info)), - nil, - ); err != nil { - return nil, fmt.Errorf("query %v returned error: %w", info, err) - } - return &info, nil -} - -// Helper function for getting a job object's CPU rate control information. -func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return nil, ErrAlreadyClosed - } - - info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{} - if err := winapi.QueryInformationJobObject( - job.handle, - windows.JobObjectCpuRateControlInformation, - uintptr(unsafe.Pointer(&info)), - uint32(unsafe.Sizeof(info)), - nil, - ); err != nil { - return nil, fmt.Errorf("query %v returned error: %w", info, err) - } - return &info, nil -} - -// Helper function for setting a job object's extended information. -func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return ErrAlreadyClosed - } - - if _, err := windows.SetInformationJobObject( - job.handle, - windows.JobObjectExtendedLimitInformation, - uintptr(unsafe.Pointer(info)), - uint32(unsafe.Sizeof(*info)), - ); err != nil { - return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err) - } - return nil -} - -// Helper function for querying job handle for IO limit information. -func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return nil, ErrAlreadyClosed - } - - ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{} - var blockCount uint32 = 1 - - if _, err := winapi.QueryIoRateControlInformationJobObject( - job.handle, - nil, - &ioInfo, - &blockCount, - ); err != nil { - return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err) - } - - if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) { - return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo) - } - return ioInfo, nil -} - -// Helper function for setting a job object's IO rate control information. -func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return ErrAlreadyClosed - } - - if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil { - return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err) - } - return nil -} - -// Helper function for setting a job object's CPU rate control information. -func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return ErrAlreadyClosed - } - if _, err := windows.SetInformationJobObject( - job.handle, - windows.JobObjectCpuRateControlInformation, - uintptr(unsafe.Pointer(cpuInfo)), - uint32(unsafe.Sizeof(cpuInfo)), - ); err != nil { - return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err) - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/layers/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/layers/doc.go deleted file mode 100644 index 747ac49a97..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/layers/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package layers deals with container layer mounting/unmounting for LCOW and WCOW -package layers diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/common.go b/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/common.go deleted file mode 100644 index fa78a8ecb6..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/common.go +++ /dev/null @@ -1,66 +0,0 @@ -//go:build windows - -package lcow - -import ( - "bytes" - "context" - "fmt" - "time" - - cmdpkg "github.com/Microsoft/hcsshim/internal/cmd" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/timeout" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/sirupsen/logrus" -) - -// formatDiskUvm creates a utility vm, mounts the disk as a scsi disk onto to the VM -// and then formats it with ext4. -func formatDiskUvm(ctx context.Context, lcowUVM *uvm.UtilityVM, controller int, lun int32, destPath string) error { - // Validate /sys/bus/scsi/devices/C:0:0:L exists as a directory - devicePath := fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d/block", controller, lun) - testdCtx, cancel := context.WithTimeout(ctx, timeout.TestDRetryLoop) - defer cancel() - for { - cmd := cmdpkg.CommandContext(testdCtx, lcowUVM, "test", "-d", devicePath) - err := cmd.Run() - if err == nil { - break - } - if _, ok := err.(*cmdpkg.ExitError); !ok { - return fmt.Errorf("failed to run %+v following hot-add %s to utility VM: %s", cmd.Spec.Args, destPath, err) - } - time.Sleep(time.Millisecond * 10) - } - cancel() - - // Get the device from under the block subdirectory by doing a simple ls. This will come back as (eg) `sda` - lsCtx, cancel := context.WithTimeout(ctx, timeout.ExternalCommandToStart) - cmd := cmdpkg.CommandContext(lsCtx, lcowUVM, "ls", devicePath) - lsOutput, err := cmd.Output() - cancel() - if err != nil { - return fmt.Errorf("failed to `%+v` following hot-add %s to utility VM: %s", cmd.Spec.Args, destPath, err) - } - device := fmt.Sprintf(`/dev/%s`, bytes.TrimSpace(lsOutput)) - log.G(ctx).WithFields(logrus.Fields{ - "dest": destPath, - "device": device, - }).Debug("lcow::FormatDisk device guest location") - - // Format it ext4 - mkfsCtx, cancel := context.WithTimeout(ctx, timeout.ExternalCommandToStart) - cmd = cmdpkg.CommandContext(mkfsCtx, lcowUVM, "mkfs.ext4", "-q", "-E", "lazy_itable_init=0,nodiscard", "-O", `^has_journal,sparse_super2,^resize_inode`, device) - var mkfsStderr bytes.Buffer - cmd.Stderr = &mkfsStderr - err = cmd.Run() - cancel() - if err != nil { - return fmt.Errorf("failed to `%+v` following hot-add %s to utility VM: %s. detailed error: %s", cmd.Spec.Args, destPath, err, mkfsStderr.String()) - } - - log.G(ctx).WithField("dest", destPath).Debug("lcow::FormatDisk complete") - - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/disk.go b/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/disk.go deleted file mode 100644 index 937b8deafa..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/disk.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build windows - -package lcow - -import ( - "context" - "errors" - "fmt" - - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/sirupsen/logrus" -) - -// FormatDisk creates a utility vm, mounts the disk as a scsi disk onto to the VM -// and then formats it with ext4. Disk is expected to be made offline before this -// command is run. The following powershell commands: -// 'Get-Disk -Number | Set-Disk -IsOffline $true' -// can be used to offline the disk. -func FormatDisk(ctx context.Context, lcowUVM *uvm.UtilityVM, destPath string) error { - if lcowUVM == nil { - return fmt.Errorf("no uvm") - } - - if lcowUVM.OS() != "linux" { - return errors.New("lcow::FormatDisk requires a linux utility VM to operate") - } - - log.G(ctx).WithFields(logrus.Fields{ - "dest": destPath, - }).Debug("lcow::FormatDisk opts") - - var options []string - scsi, err := lcowUVM.AddSCSIPhysicalDisk(ctx, destPath, "", false, options) // No destination as not formatted - if err != nil { - return err - } - - defer func() { - _ = scsi.Release(ctx) - }() - - log.G(ctx).WithFields(logrus.Fields{ - "dest": destPath, - "controller": scsi.Controller, - "lun": scsi.LUN, - }).Debug("lcow::FormatDisk device attached") - - if err := formatDiskUvm(ctx, lcowUVM, scsi.Controller, scsi.LUN, destPath); err != nil { - return err - } - log.G(ctx).WithField("dest", destPath).Debug("lcow::FormatDisk complete") - - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/doc.go deleted file mode 100644 index 6105d5b57d..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/doc.go +++ /dev/null @@ -1 +0,0 @@ -package lcow diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go b/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go deleted file mode 100644 index c86d141adf..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go +++ /dev/null @@ -1,154 +0,0 @@ -//go:build windows - -package lcow - -import ( - "bytes" - "context" - "errors" - "fmt" - "os" - "time" - - "github.com/Microsoft/go-winio/vhd" - cmdpkg "github.com/Microsoft/hcsshim/internal/cmd" - "github.com/Microsoft/hcsshim/internal/copyfile" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/timeout" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/sirupsen/logrus" -) - -const ( - // DefaultScratchSizeGB is the size of the default LCOW scratch disk in GB - DefaultScratchSizeGB = 20 - - // defaultVhdxBlockSizeMB is the block-size for the scratch VHDx's this - // package can create. - defaultVhdxBlockSizeMB = 1 -) - -// CreateScratch uses a utility VM to create an empty scratch disk of a -// requested size. It has a caching capability. If the cacheFile exists, and the -// request is for a default size, a copy of that is made to the target. If the -// size is non-default, or the cache file does not exist, it uses a utility VM -// to create target. It is the responsibility of the caller to synchronize -// simultaneous attempts to create the cache file. -func CreateScratch(ctx context.Context, lcowUVM *uvm.UtilityVM, destFile string, sizeGB uint32, cacheFile string) error { - if lcowUVM == nil { - return fmt.Errorf("no uvm") - } - - if lcowUVM.OS() != "linux" { - return errors.New("lcow::CreateScratch requires a linux utility VM to operate") - } - - log.G(ctx).WithFields(logrus.Fields{ - "dest": destFile, - "sizeGB": sizeGB, - "cache": cacheFile, - }).Debug("lcow::CreateScratch opts") - - // Retrieve from cache if the default size and already on disk - if cacheFile != "" && sizeGB == DefaultScratchSizeGB { - if _, err := os.Stat(cacheFile); err == nil { - if err := copyfile.CopyFile(ctx, cacheFile, destFile, false); err != nil { - return fmt.Errorf("failed to copy cached file '%s' to '%s': %s", cacheFile, destFile, err) - } - log.G(ctx).WithFields(logrus.Fields{ - "dest": destFile, - "cache": cacheFile, - }).Debug("lcow::CreateScratch copied from cache") - return nil - } - } - - // Create the VHDX - if err := vhd.CreateVhdx(destFile, sizeGB, defaultVhdxBlockSizeMB); err != nil { - return fmt.Errorf("failed to create VHDx %s: %s", destFile, err) - } - - var options []string - scsi, err := lcowUVM.AddSCSI( - ctx, - destFile, - "", // No destination as not formatted - false, - lcowUVM.ScratchEncryptionEnabled(), - options, - uvm.VMAccessTypeIndividual, - ) - if err != nil { - return err - } - removeSCSI := true - defer func() { - if removeSCSI { - _ = lcowUVM.RemoveSCSI(ctx, destFile) - } - }() - - log.G(ctx).WithFields(logrus.Fields{ - "dest": destFile, - "controller": scsi.Controller, - "lun": scsi.LUN, - }).Debug("lcow::CreateScratch device attached") - - // Validate /sys/bus/scsi/devices/C:0:0:L exists as a directory - devicePath := fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d/block", scsi.Controller, scsi.LUN) - testdCtx, cancel := context.WithTimeout(ctx, timeout.TestDRetryLoop) - defer cancel() - for { - cmd := cmdpkg.CommandContext(testdCtx, lcowUVM, "test", "-d", devicePath) - err := cmd.Run() - if err == nil { - break - } - if _, ok := err.(*cmdpkg.ExitError); !ok { - return fmt.Errorf("failed to run %+v following hot-add %s to utility VM: %s", cmd.Spec.Args, destFile, err) - } - time.Sleep(time.Millisecond * 10) - } - cancel() - - // Get the device from under the block subdirectory by doing a simple ls. This will come back as (eg) `sda` - lsCtx, cancel := context.WithTimeout(ctx, timeout.ExternalCommandToStart) - cmd := cmdpkg.CommandContext(lsCtx, lcowUVM, "ls", devicePath) - lsOutput, err := cmd.Output() - cancel() - if err != nil { - return fmt.Errorf("failed to `%+v` following hot-add %s to utility VM: %s", cmd.Spec.Args, destFile, err) - } - device := fmt.Sprintf(`/dev/%s`, bytes.TrimSpace(lsOutput)) - log.G(ctx).WithFields(logrus.Fields{ - "dest": destFile, - "device": device, - }).Debug("lcow::CreateScratch device guest location") - - // Format it ext4 - mkfsCtx, cancel := context.WithTimeout(ctx, timeout.ExternalCommandToStart) - cmd = cmdpkg.CommandContext(mkfsCtx, lcowUVM, "mkfs.ext4", "-q", "-E", "lazy_itable_init=0,nodiscard", "-O", `^has_journal,sparse_super2,^resize_inode`, device) - var mkfsStderr bytes.Buffer - cmd.Stderr = &mkfsStderr - err = cmd.Run() - cancel() - if err != nil { - return fmt.Errorf("failed to `%+v` following hot-add %s to utility VM: %s", cmd.Spec.Args, destFile, err) - } - - // Hot-Remove before we copy it - removeSCSI = false - if err := lcowUVM.RemoveSCSI(ctx, destFile); err != nil { - return fmt.Errorf("failed to hot-remove: %s", err) - } - - // Populate the cache. - if cacheFile != "" && (sizeGB == DefaultScratchSizeGB) { - if err := copyfile.CopyFile(ctx, destFile, cacheFile, true); err != nil { - return fmt.Errorf("failed to seed cache '%s' from '%s': %s", destFile, cacheFile, err) - } - } - - log.G(ctx).WithField("dest", destFile).Debug("lcow::CreateScratch created (non-cache)") - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/log/context.go b/test/vendor/github.com/Microsoft/hcsshim/internal/log/context.go deleted file mode 100644 index ae4faf4234..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/log/context.go +++ /dev/null @@ -1,116 +0,0 @@ -package log - -import ( - "context" - - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -type entryContextKeyType int - -const _entryContextKey entryContextKeyType = iota - -var ( - // L is the default, blank logging entry. WithField and co. all return a copy - // of the original entry, so this will not leak fields between calls. - // - // Do NOT modify fields directly, as that will corrupt state for all users and - // is not thread safe. - // Instead, use `L.With*` or `L.Dup()`. Or `G(context.Background())`. - L = logrus.NewEntry(logrus.StandardLogger()) - - // G is an alias for GetEntry - G = GetEntry - - // S is an alias for SetEntry - S = SetEntry - - // U is an alias for UpdateContext - U = UpdateContext -) - -// GetEntry returns a `logrus.Entry` stored in the context, if one exists. -// Otherwise, it returns a default entry that points to the current context. -// -// Note: if the a new entry is returned, it will reference the passed in context. -// However, existing contexts may be stored in parent contexts and additionally reference -// earlier contexts. -// Use `UpdateContext` to update the entry and context. -func GetEntry(ctx context.Context) *logrus.Entry { - entry := fromContext(ctx) - - if entry == nil { - entry = L.WithContext(ctx) - } - - return entry -} - -// SetEntry updates the log entry in the context with the provided fields, and -// returns both. It is equivlent to: -// entry := GetEntry(ctx).WithFields(fields) -// ctx = WithContext(ctx, entry) -// -// See WithContext for more information. -func SetEntry(ctx context.Context, fields logrus.Fields) (context.Context, *logrus.Entry) { - e := GetEntry(ctx) - if len(fields) > 0 { - e = e.WithFields(fields) - } - return WithContext(ctx, e) -} - -// UpdateContext extracts the log entry from the context, and, if the entry's -// context points to a parent's of the current context, ands the entry -// to the most recent context. It is equivlent to: -// entry :=GetEntry(ctx) -// ctx = WithContext(ctx, entry) -// -// This allows the entry to reference the most recent context and any new -// values (such as span contexts) added to it. -// -// See WithContext for more information. -func UpdateContext(ctx context.Context) context.Context { - // there is no way to check its ctx (and not one of its parents) that contains `e` - // so, at a slight cost, force add `e` to the context - ctx, _ = WithContext(ctx, GetEntry(ctx)) - return ctx -} - -// WithContext returns a context that contains the provided log entry. -// The entry can be extracted with `GetEntry` (`G`) -// -// The entry in the context is a copy of `entry` (generated by `entry.WithContext`) -func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *logrus.Entry) { - // regardless of the order, entry.Context != GetEntry(ctx) - // here, the returned entry will reference the supplied context - entry = entry.WithContext(ctx) - ctx = context.WithValue(ctx, _entryContextKey, entry) - - return ctx, entry -} - -// Copy extracts the tracing Span and logging entry from the src Context, if they -// exist, and adds them to the dst Context. -// -// This is useful to share tracing and logging between contexts, but not the -// cancellation. For example, if the src Context has been cancelled but cleanup -// operations triggered by the cancellation require a non-cancelled context to -// execute. -func Copy(dst context.Context, src context.Context) context.Context { - if s := trace.FromContext(src); s != nil { - dst = trace.NewContext(dst, s) - } - - if e := fromContext(src); e != nil { - dst, _ = WithContext(dst, e) - } - - return dst -} - -func fromContext(ctx context.Context) *logrus.Entry { - e, _ := ctx.Value(_entryContextKey).(*logrus.Entry) - return e -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go b/test/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go deleted file mode 100644 index 8f89405923..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go +++ /dev/null @@ -1,45 +0,0 @@ -package log - -import ( - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -// Hook serves to intercept and format `logrus.Entry`s before they are passed -// to the ETW hook. -// -// The containerd shim discards the (formatted) logrus output, and outputs only via ETW. -// The Linux GCS outputs logrus entries over stdout, which is consumed by the shim and -// then re-output via the ETW hook. -type Hook struct{} - -var _ logrus.Hook = &Hook{} - -func NewHook() *Hook { - return &Hook{} -} - -func (h *Hook) Levels() []logrus.Level { - return logrus.AllLevels -} - -func (h *Hook) Fire(e *logrus.Entry) (err error) { - h.addSpanContext(e) - - return nil -} - -func (h *Hook) addSpanContext(e *logrus.Entry) { - ctx := e.Context - if ctx == nil { - return - } - span := trace.FromContext(ctx) - if span == nil { - return - } - sctx := span.SpanContext() - e.Data[logfields.TraceID] = sctx.TraceID.String() - e.Data[logfields.SpanID] = sctx.SpanID.String() -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go b/test/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go deleted file mode 100644 index d51e0fd89f..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go +++ /dev/null @@ -1,194 +0,0 @@ -package log - -import ( - "bytes" - "encoding/json" - "errors" - "strings" - "sync/atomic" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// This package scrubs objects of potentially sensitive information to pass to logging - -type genMap = map[string]interface{} -type scrubberFunc func(genMap) error - -const _scrubbedReplacement = "" - -var ( - ErrUnknownType = errors.New("encoded object is of unknown type") - - // case sensitive keywords, so "env" is not a substring on "Environment" - _scrubKeywords = [][]byte{[]byte("env"), []byte("Environment")} - - _scrub int32 -) - -// SetScrubbing enables scrubbing -func SetScrubbing(enable bool) { - v := int32(0) // cant convert from bool to int32 directly - if enable { - v = 1 - } - atomic.StoreInt32(&_scrub, v) -} - -// IsScrubbingEnabled checks if scrubbing is enabled -func IsScrubbingEnabled() bool { - v := atomic.LoadInt32(&_scrub) - return v != 0 -} - -// ScrubProcessParameters scrubs HCS Create Process requests with config parameters of -// type internal/hcs/schema2.ScrubProcessParameters (aka hcsshema.ScrubProcessParameters) -func ScrubProcessParameters(s string) (string, error) { - // todo: deal with v1 ProcessConfig - b := []byte(s) - if !IsScrubbingEnabled() || !hasKeywords(b) || !json.Valid(b) { - return s, nil - } - - pp := hcsschema.ProcessParameters{} - if err := json.Unmarshal(b, &pp); err != nil { - return "", err - } - pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement} - - buf := bytes.NewBuffer(b[:0]) - if err := encode(buf, pp); err != nil { - return "", err - } - return strings.TrimSpace(buf.String()), nil -} - -// ScrubBridgeCreate scrubs requests sent over the bridge of type -// internal/gcs/protocol.containerCreate wrapping an internal/hcsoci.linuxHostedSystem -func ScrubBridgeCreate(b []byte) ([]byte, error) { - return scrubBytes(b, scrubBridgeCreate) -} - -func scrubBridgeCreate(m genMap) error { - if !isRequestBase(m) { - return ErrUnknownType - } - if ss, ok := m["ContainerConfig"]; ok { - // ContainerConfig is a json encoded struct passed as a regular string field - s, ok := ss.(string) - if !ok { - return ErrUnknownType - } - b, err := scrubBytes([]byte(s), scrubLinuxHostedSystem) - if err != nil { - return err - } - m["ContainerConfig"] = string(b) - return nil - } - return ErrUnknownType -} - -func scrubLinuxHostedSystem(m genMap) error { - if m, ok := index(m, "OciSpecification"); ok { - if _, ok := m["annotations"]; ok { - m["annotations"] = map[string]string{_scrubbedReplacement: _scrubbedReplacement} - } - if m, ok := index(m, "process"); ok { - if _, ok := m["env"]; ok { - m["env"] = []string{_scrubbedReplacement} - return nil - } - } - } - return ErrUnknownType -} - -// ScrubBridgeExecProcess scrubs requests sent over the bridge of type -// internal/gcs/protocol.containerExecuteProcess -func ScrubBridgeExecProcess(b []byte) ([]byte, error) { - return scrubBytes(b, scrubExecuteProcess) -} - -func scrubExecuteProcess(m genMap) error { - if !isRequestBase(m) { - return ErrUnknownType - } - if m, ok := index(m, "Settings"); ok { - if ss, ok := m["ProcessParameters"]; ok { - // ProcessParameters is a json encoded struct passed as a regular sting field - s, ok := ss.(string) - if !ok { - return ErrUnknownType - } - - s, err := ScrubProcessParameters(s) - if err != nil { - return err - } - - m["ProcessParameters"] = s - return nil - } - } - return ErrUnknownType -} - -func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) { - if !IsScrubbingEnabled() || !hasKeywords(b) || !json.Valid(b) { - return b, nil - } - - m := make(genMap) - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - - // could use regexp, but if the env strings contain braces, the regexp fails - // parsing into individual structs would require access to private structs - if err := scrub(m); err != nil { - return nil, err - } - - buf := &bytes.Buffer{} - if err := encode(buf, m); err != nil { - return nil, err - } - - return bytes.TrimSpace(buf.Bytes()), nil -} - -func encode(buf *bytes.Buffer, v interface{}) error { - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - if err := enc.Encode(v); err != nil { - return err - } - return nil -} - -func isRequestBase(m genMap) bool { - // neither of these are (currently) `omitempty` - _, a := m["ActivityId"] - _, c := m["ContainerId"] - return a && c -} - -// combination `m, ok := m[s]` and `m, ok := m.(genMap)` -func index(m genMap, s string) (genMap, bool) { - if m, ok := m[s]; ok { - mm, ok := m.(genMap) - return mm, ok - } - - return m, false -} - -func hasKeywords(b []byte) bool { - for _, bb := range _scrubKeywords { - if bytes.Contains(b, bb) { - return true - } - } - return false -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/test/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go deleted file mode 100644 index 3e175e5222..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go +++ /dev/null @@ -1,61 +0,0 @@ -package logfields - -const ( - // Identifiers - - Name = "name" - Namespace = "namespace" - Operation = "operation" - - ID = "id" - SandboxID = "sid" - ContainerID = "cid" - ExecID = "eid" - ProcessID = "pid" - TaskID = "tid" - UVMID = "uvm-id" - - // networking and IO - - File = "file" - Path = "path" - Bytes = "bytes" - Pipe = "pipe" - - // Common Misc - - Attempt = "attemptNo" - JSON = "json" - - // Time - - StartTime = "startTime" - EndTime = "endTime" - Duration = "duration" - Timeout = "timeout" - - // Keys/values - - Field = "field" - Key = "key" - OCIAnnotation = "oci-annotation" - Value = "value" - Options = "options" - - // Golang type's - - ExpectedType = "expected-type" - Bool = "bool" - Uint32 = "uint32" - Uint64 = "uint64" - - // runhcs - - VMShimOperation = "vmshim-op" - - // logging and tracing - - TraceID = "traceID" - SpanID = "spanID" - ParentSpanID = "parentSpanID" -) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go b/test/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go deleted file mode 100644 index 1ef5814d7e..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go +++ /dev/null @@ -1,316 +0,0 @@ -package memory - -import ( - "github.com/pkg/errors" -) - -const ( - minimumClassSize = MiB - maximumClassSize = 4 * GiB - memoryClassNumber = 7 -) - -var ( - ErrInvalidMemoryClass = errors.New("invalid memory class") - ErrEarlyMerge = errors.New("not all children have been freed") - ErrEmptyPoolOperation = errors.New("operation on empty pool") -) - -// GetMemoryClassType returns the minimum memory class type that can hold a device of -// a given size. The smallest class is 1MB and the largest one is 4GB with 2 bit offset -// intervals in between, for a total of 7 different classes. This function does not -// do a validity check -func GetMemoryClassType(s uint64) classType { - s = (s - 1) >> 20 - memCls := uint32(0) - for s > 0 { - s = s >> 2 - memCls++ - } - return classType(memCls) -} - -// GetMemoryClassSize returns size in bytes for a given memory class -func GetMemoryClassSize(memCls classType) (uint64, error) { - if memCls >= memoryClassNumber { - return 0, ErrInvalidMemoryClass - } - return minimumClassSize << (2 * memCls), nil -} - -// region represents a contiguous memory block -type region struct { - // parent region that has been split into 4 - parent *region - class classType - // offset represents offset in bytes - offset uint64 -} - -// memoryPool tracks free and busy (used) memory regions -type memoryPool struct { - free map[uint64]*region - busy map[uint64]*region -} - -// PoolAllocator implements a memory allocation strategy similar to buddy-malloc https://github.com/evanw/buddy-malloc/blob/master/buddy-malloc.c -// We borrow the idea of spanning a tree of fixed size regions on top of a contiguous memory -// space. -// -// There are a total of 7 different region sizes that can be allocated, with the smallest -// being 1MB and the largest 4GB (the default maximum size of a Virtual PMem device). -// -// For efficiency and to reduce fragmentation an entire region is allocated when requested. -// When there's no available region of requested size, we try to allocate more memory for -// this particular size by splitting the next available larger region into smaller ones, e.g. -// if there's no region available for size class 0, we try splitting a region from class 1, -// then class 2 etc, until we are able to do so or hit the upper limit. -type PoolAllocator struct { - pools [memoryClassNumber]*memoryPool -} - -var _ MappedRegion = ®ion{} -var _ Allocator = &PoolAllocator{} - -func (r *region) Offset() uint64 { - return r.offset -} - -func (r *region) Size() uint64 { - sz, err := GetMemoryClassSize(r.class) - if err != nil { - panic(err) - } - return sz -} - -func (r *region) Type() classType { - return r.class -} - -func newEmptyMemoryPool() *memoryPool { - return &memoryPool{ - free: make(map[uint64]*region), - busy: make(map[uint64]*region), - } -} - -func NewPoolMemoryAllocator() PoolAllocator { - pa := PoolAllocator{} - p := newEmptyMemoryPool() - // by default we allocate a single region with maximum possible size (class type) - p.free[0] = ®ion{ - class: memoryClassNumber - 1, - offset: 0, - } - pa.pools[memoryClassNumber-1] = p - return pa -} - -// Allocate checks memory region pool for the given `size` and returns a free region with -// minimal offset, if none available tries expanding matched memory pool. -// -// Internally it's done via moving a region from free pool into a busy pool -func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) { - memCls := GetMemoryClassType(size) - if memCls >= memoryClassNumber { - return nil, ErrInvalidMemoryClass - } - - // find region with the smallest offset - nextCls, nextOffset, err := pa.findNextOffset(memCls) - if err != nil { - return nil, err - } - - // this means that there are no more regions for the current class, try expanding - if nextCls != memCls { - if err := pa.split(memCls); err != nil { - if err == ErrInvalidMemoryClass { - return nil, ErrNotEnoughSpace - } - return nil, err - } - } - - if err := pa.markBusy(memCls, nextOffset); err != nil { - return nil, err - } - - // by this point memory pool for memCls should have been created, - // either prior or during split call - if r := pa.pools[memCls].busy[nextOffset]; r != nil { - return r, nil - } - - return nil, ErrNotEnoughSpace -} - -// Release marks a memory region of class `memCls` and offset `offset` as free and tries to merge smaller regions into -// a bigger one -func (pa *PoolAllocator) Release(reg MappedRegion) error { - mp := pa.pools[reg.Type()] - if mp == nil { - return ErrEmptyPoolOperation - } - - err := pa.markFree(reg.Type(), reg.Offset()) - if err != nil { - return err - } - - n := mp.free[reg.Offset()] - if n == nil { - return ErrNotAllocated - } - if err := pa.merge(n.parent); err != nil { - if err != ErrEarlyMerge { - return err - } - } - return nil -} - -// findNextOffset finds next region location for a given memCls -func (pa *PoolAllocator) findNextOffset(memCls classType) (classType, uint64, error) { - for mc := memCls; mc < memoryClassNumber; mc++ { - pi := pa.pools[mc] - if pi == nil || len(pi.free) == 0 { - continue - } - - target := uint64(maximumClassSize) - for offset := range pi.free { - if offset < target { - target = offset - } - } - return mc, target, nil - } - return 0, 0, ErrNotEnoughSpace -} - -// split tries to recursively split a bigger memory region into smaller ones until it succeeds or hits the upper limit -func (pa *PoolAllocator) split(clsType classType) error { - nextClsType := clsType + 1 - if nextClsType >= memoryClassNumber { - return ErrInvalidMemoryClass - } - - nextPool := pa.pools[nextClsType] - if nextPool == nil { - nextPool = newEmptyMemoryPool() - pa.pools[nextClsType] = nextPool - } - - cls, offset, err := pa.findNextOffset(nextClsType) - if err != nil { - return err - } - // not enough memory in the next class, try to recursively expand - if cls != nextClsType { - if err := pa.split(nextClsType); err != nil { - return err - } - } - - if err := pa.markBusy(nextClsType, offset); err != nil { - return err - } - - // memCls validity has been checked already, we can ignore the error - clsSize, _ := GetMemoryClassSize(clsType) - - nextReg := nextPool.busy[offset] - if nextReg == nil { - return ErrNotAllocated - } - - // expand memCls - cp := pa.pools[clsType] - if cp == nil { - cp = newEmptyMemoryPool() - pa.pools[clsType] = cp - } - // create 4 smaller regions - for i := uint64(0); i < 4; i++ { - offset := nextReg.offset + i*clsSize - reg := ®ion{ - parent: nextReg, - class: clsType, - offset: offset, - } - cp.free[offset] = reg - } - return nil -} - -func (pa *PoolAllocator) merge(parent *region) error { - // nothing to merge - if parent == nil { - return nil - } - - childCls := parent.class - 1 - childPool := pa.pools[childCls] - // no child nodes to merge, try to merge parent - if childPool == nil { - return pa.merge(parent.parent) - } - - childSize, err := GetMemoryClassSize(childCls) - if err != nil { - return err - } - - // check if all the child nodes are free - var children []*region - for i := uint64(0); i < 4; i++ { - child, free := childPool.free[parent.offset+i*childSize] - if !free { - return ErrEarlyMerge - } - children = append(children, child) - } - - // at this point all the child nodes will be free and we can merge - for _, child := range children { - delete(childPool.free, child.offset) - } - - if err := pa.markFree(parent.class, parent.offset); err != nil { - return err - } - - return pa.merge(parent.parent) -} - -// markFree internally moves a region with `offset` from busy to free map -func (pa *PoolAllocator) markFree(memCls classType, offset uint64) error { - clsPool := pa.pools[memCls] - if clsPool == nil { - return ErrEmptyPoolOperation - } - - if reg, exists := clsPool.busy[offset]; exists { - clsPool.free[offset] = reg - delete(clsPool.busy, offset) - return nil - } - return ErrNotAllocated -} - -// markBusy internally moves a region with `offset` from free to busy map -func (pa *PoolAllocator) markBusy(memCls classType, offset uint64) error { - clsPool := pa.pools[memCls] - if clsPool == nil { - return ErrEmptyPoolOperation - } - - if reg, exists := clsPool.free[offset]; exists { - clsPool.busy[offset] = reg - delete(clsPool.free, offset) - return nil - } - return ErrNotAllocated -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go b/test/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go deleted file mode 100644 index d6cdb8cc4c..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go +++ /dev/null @@ -1,28 +0,0 @@ -package memory - -import "github.com/pkg/errors" - -type classType uint32 - -const ( - MiB = 1024 * 1024 - GiB = 1024 * MiB -) - -var ( - ErrNotEnoughSpace = errors.New("not enough space") - ErrNotAllocated = errors.New("no memory allocated at the given offset") -) - -// MappedRegion represents a memory block with an offset -type MappedRegion interface { - Offset() uint64 - Size() uint64 - Type() classType -} - -// Allocator is an interface for memory allocation -type Allocator interface { - Allocate(uint64) (MappedRegion, error) - Release(MappedRegion) error -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/ncproxy/networking/endpoints.go b/test/vendor/github.com/Microsoft/hcsshim/internal/ncproxy/networking/endpoints.go deleted file mode 100644 index b8e35bd6bb..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/ncproxy/networking/endpoints.go +++ /dev/null @@ -1,33 +0,0 @@ -package networking - -type Endpoint struct { - EndpointName string - NamespaceID string - Settings *EndpointSettings -} - -type EndpointSettings struct { - Name string - Macaddress string - IPAddress string - IPAddressPrefixLength uint32 - NetworkName string - DefaultGateway string - DeviceDetails *DeviceDetails -} - -type DeviceDetails struct { - PCIDeviceDetails *PCIDeviceDetails -} - -type PCIDeviceDetails struct { - DeviceID string - VirtualFunctionIndex uint32 -} - -func NewEndpoint(settings *EndpointSettings) (*Endpoint, error) { - return &Endpoint{ - EndpointName: settings.Name, - Settings: settings, - }, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/ncproxy/networking/networks.go b/test/vendor/github.com/Microsoft/hcsshim/internal/ncproxy/networking/networks.go deleted file mode 100644 index 7297b5a247..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/ncproxy/networking/networks.go +++ /dev/null @@ -1,17 +0,0 @@ -package networking - -type Network struct { - NetworkName string - Settings *NetworkSettings -} - -type NetworkSettings struct { - Name string -} - -func NewNetwork(settings *NetworkSettings) (*Network, error) { - return &Network{ - NetworkName: settings.Name, - Settings: settings, - }, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/ncproxyttrpc/networkconfigproxy.pb.go b/test/vendor/github.com/Microsoft/hcsshim/internal/ncproxyttrpc/networkconfigproxy.pb.go deleted file mode 100644 index 39ac60ed87..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/ncproxyttrpc/networkconfigproxy.pb.go +++ /dev/null @@ -1,1311 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/Microsoft/hcsshim/internal/ncproxyttrpc/networkconfigproxy.proto - -package ncproxyttrpc - -import ( - context "context" - fmt "fmt" - github_com_containerd_ttrpc "github.com/containerd/ttrpc" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type RequestTypeInternal int32 - -const ( - RequestTypeInternal_Setup RequestTypeInternal = 0 - RequestTypeInternal_Teardown RequestTypeInternal = 1 -) - -var RequestTypeInternal_name = map[int32]string{ - 0: "Setup", - 1: "Teardown", -} - -var RequestTypeInternal_value = map[string]int32{ - "Setup": 0, - "Teardown": 1, -} - -func (x RequestTypeInternal) String() string { - return proto.EnumName(RequestTypeInternal_name, int32(x)) -} - -func (RequestTypeInternal) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_11f9efc6dfbf9b45, []int{0} -} - -type RegisterComputeAgentRequest struct { - AgentAddress string `protobuf:"bytes,1,opt,name=agent_address,json=agentAddress,proto3" json:"agent_address,omitempty"` - ContainerID string `protobuf:"bytes,2,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RegisterComputeAgentRequest) Reset() { *m = RegisterComputeAgentRequest{} } -func (*RegisterComputeAgentRequest) ProtoMessage() {} -func (*RegisterComputeAgentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_11f9efc6dfbf9b45, []int{0} -} -func (m *RegisterComputeAgentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RegisterComputeAgentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RegisterComputeAgentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RegisterComputeAgentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterComputeAgentRequest.Merge(m, src) -} -func (m *RegisterComputeAgentRequest) XXX_Size() int { - return m.Size() -} -func (m *RegisterComputeAgentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterComputeAgentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RegisterComputeAgentRequest proto.InternalMessageInfo - -type RegisterComputeAgentResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RegisterComputeAgentResponse) Reset() { *m = RegisterComputeAgentResponse{} } -func (*RegisterComputeAgentResponse) ProtoMessage() {} -func (*RegisterComputeAgentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_11f9efc6dfbf9b45, []int{1} -} -func (m *RegisterComputeAgentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RegisterComputeAgentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RegisterComputeAgentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RegisterComputeAgentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterComputeAgentResponse.Merge(m, src) -} -func (m *RegisterComputeAgentResponse) XXX_Size() int { - return m.Size() -} -func (m *RegisterComputeAgentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterComputeAgentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_RegisterComputeAgentResponse proto.InternalMessageInfo - -type UnregisterComputeAgentRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UnregisterComputeAgentRequest) Reset() { *m = UnregisterComputeAgentRequest{} } -func (*UnregisterComputeAgentRequest) ProtoMessage() {} -func (*UnregisterComputeAgentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_11f9efc6dfbf9b45, []int{2} -} -func (m *UnregisterComputeAgentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UnregisterComputeAgentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UnregisterComputeAgentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UnregisterComputeAgentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UnregisterComputeAgentRequest.Merge(m, src) -} -func (m *UnregisterComputeAgentRequest) XXX_Size() int { - return m.Size() -} -func (m *UnregisterComputeAgentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UnregisterComputeAgentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UnregisterComputeAgentRequest proto.InternalMessageInfo - -type UnregisterComputeAgentResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UnregisterComputeAgentResponse) Reset() { *m = UnregisterComputeAgentResponse{} } -func (*UnregisterComputeAgentResponse) ProtoMessage() {} -func (*UnregisterComputeAgentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_11f9efc6dfbf9b45, []int{3} -} -func (m *UnregisterComputeAgentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UnregisterComputeAgentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UnregisterComputeAgentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UnregisterComputeAgentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UnregisterComputeAgentResponse.Merge(m, src) -} -func (m *UnregisterComputeAgentResponse) XXX_Size() int { - return m.Size() -} -func (m *UnregisterComputeAgentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UnregisterComputeAgentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UnregisterComputeAgentResponse proto.InternalMessageInfo - -type ConfigureNetworkingInternalRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - RequestType RequestTypeInternal `protobuf:"varint,2,opt,name=request_type,json=requestType,proto3,enum=RequestTypeInternal" json:"request_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConfigureNetworkingInternalRequest) Reset() { *m = ConfigureNetworkingInternalRequest{} } -func (*ConfigureNetworkingInternalRequest) ProtoMessage() {} -func (*ConfigureNetworkingInternalRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_11f9efc6dfbf9b45, []int{4} -} -func (m *ConfigureNetworkingInternalRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfigureNetworkingInternalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConfigureNetworkingInternalRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConfigureNetworkingInternalRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfigureNetworkingInternalRequest.Merge(m, src) -} -func (m *ConfigureNetworkingInternalRequest) XXX_Size() int { - return m.Size() -} -func (m *ConfigureNetworkingInternalRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConfigureNetworkingInternalRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConfigureNetworkingInternalRequest proto.InternalMessageInfo - -type ConfigureNetworkingInternalResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConfigureNetworkingInternalResponse) Reset() { *m = ConfigureNetworkingInternalResponse{} } -func (*ConfigureNetworkingInternalResponse) ProtoMessage() {} -func (*ConfigureNetworkingInternalResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_11f9efc6dfbf9b45, []int{5} -} -func (m *ConfigureNetworkingInternalResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfigureNetworkingInternalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConfigureNetworkingInternalResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConfigureNetworkingInternalResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfigureNetworkingInternalResponse.Merge(m, src) -} -func (m *ConfigureNetworkingInternalResponse) XXX_Size() int { - return m.Size() -} -func (m *ConfigureNetworkingInternalResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ConfigureNetworkingInternalResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ConfigureNetworkingInternalResponse proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("RequestTypeInternal", RequestTypeInternal_name, RequestTypeInternal_value) - proto.RegisterType((*RegisterComputeAgentRequest)(nil), "RegisterComputeAgentRequest") - proto.RegisterType((*RegisterComputeAgentResponse)(nil), "RegisterComputeAgentResponse") - proto.RegisterType((*UnregisterComputeAgentRequest)(nil), "UnregisterComputeAgentRequest") - proto.RegisterType((*UnregisterComputeAgentResponse)(nil), "UnregisterComputeAgentResponse") - proto.RegisterType((*ConfigureNetworkingInternalRequest)(nil), "ConfigureNetworkingInternalRequest") - proto.RegisterType((*ConfigureNetworkingInternalResponse)(nil), "ConfigureNetworkingInternalResponse") -} - -func init() { - proto.RegisterFile("github.com/Microsoft/hcsshim/internal/ncproxyttrpc/networkconfigproxy.proto", fileDescriptor_11f9efc6dfbf9b45) -} - -var fileDescriptor_11f9efc6dfbf9b45 = []byte{ - // 436 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6b, 0x14, 0x41, - 0x10, 0x9d, 0x0e, 0x28, 0xa6, 0x76, 0xd5, 0xd0, 0x59, 0x24, 0x8c, 0x49, 0x27, 0xcc, 0x2a, 0x88, - 0x87, 0x1e, 0x58, 0x0f, 0x1e, 0xbc, 0x98, 0xac, 0x97, 0x45, 0x14, 0x99, 0x24, 0x20, 0xf1, 0xb0, - 0xcc, 0xce, 0x54, 0x66, 0x1b, 0xdd, 0xee, 0xb6, 0xbb, 0xc7, 0xb8, 0x37, 0xff, 0x82, 0xff, 0xc3, - 0x1f, 0x92, 0xa3, 0x47, 0x4f, 0x62, 0xe6, 0x97, 0xc8, 0x7c, 0x18, 0x45, 0xc6, 0x49, 0x30, 0xb7, - 0xae, 0x47, 0xf5, 0x7b, 0xc5, 0x7b, 0x55, 0xf0, 0x3c, 0x13, 0x6e, 0x9e, 0xcf, 0x78, 0xa2, 0x16, - 0xe1, 0x0b, 0x91, 0x18, 0x65, 0xd5, 0xb1, 0x0b, 0xe7, 0x89, 0xb5, 0x73, 0xb1, 0x08, 0x85, 0x74, - 0x68, 0x64, 0xfc, 0x2e, 0x94, 0x89, 0x36, 0xea, 0xe3, 0xd2, 0x39, 0xa3, 0x93, 0x50, 0xa2, 0x3b, - 0x51, 0xe6, 0x6d, 0xa2, 0xe4, 0xb1, 0xc8, 0x2a, 0x9c, 0x6b, 0xa3, 0x9c, 0xf2, 0x07, 0x99, 0xca, - 0x54, 0xf5, 0x0c, 0xcb, 0x57, 0x8d, 0x06, 0x1f, 0xe0, 0x6e, 0x84, 0x99, 0xb0, 0x0e, 0xcd, 0x58, - 0x2d, 0x74, 0xee, 0x70, 0x37, 0x43, 0xe9, 0x22, 0x7c, 0x9f, 0xa3, 0x75, 0x74, 0x08, 0x37, 0xe3, - 0xb2, 0x9e, 0xc6, 0x69, 0x6a, 0xd0, 0xda, 0x0d, 0xb2, 0x43, 0x1e, 0xac, 0x46, 0xfd, 0x0a, 0xdc, - 0xad, 0x31, 0x3a, 0x82, 0x7e, 0xa2, 0xa4, 0x8b, 0x85, 0x44, 0x33, 0x15, 0xe9, 0xc6, 0x4a, 0xd9, - 0xb3, 0x77, 0xbb, 0xf8, 0xbe, 0xdd, 0x1b, 0xff, 0xc2, 0x27, 0xcf, 0xa2, 0xde, 0x79, 0xd3, 0x24, - 0x0d, 0x18, 0x6c, 0xb6, 0xeb, 0x5a, 0xad, 0xa4, 0xc5, 0x60, 0x1f, 0xb6, 0x0e, 0xa5, 0xe9, 0x98, - 0xec, 0x6f, 0x51, 0x72, 0x09, 0xd1, 0x1d, 0x60, 0xff, 0x22, 0x6d, 0x64, 0x3f, 0x13, 0x08, 0xc6, - 0x95, 0x75, 0xb9, 0xc1, 0x97, 0xb5, 0x95, 0x42, 0x66, 0x93, 0xc6, 0xea, 0x2b, 0x88, 0xd3, 0xc7, - 0xd0, 0x37, 0xf5, 0xf7, 0xa9, 0x5b, 0x6a, 0xac, 0x5c, 0xba, 0x35, 0x1a, 0xf0, 0x86, 0xf3, 0x60, - 0xa9, 0xf1, 0x5c, 0xa6, 0x67, 0x7e, 0x83, 0xc1, 0x7d, 0x18, 0x76, 0x8e, 0x54, 0x8f, 0xfe, 0x90, - 0xc3, 0x7a, 0x0b, 0x15, 0x5d, 0x85, 0x6b, 0xfb, 0xe8, 0x72, 0xbd, 0xe6, 0xd1, 0x3e, 0xdc, 0x38, - 0xc0, 0xd8, 0xa4, 0xea, 0x44, 0xae, 0x91, 0xd1, 0x97, 0x15, 0xa0, 0x0d, 0x5d, 0x4d, 0xff, 0xaa, - 0x5c, 0x16, 0x7a, 0x08, 0x83, 0xb6, 0x60, 0xe8, 0x26, 0xef, 0xd8, 0x13, 0x7f, 0x8b, 0x77, 0xa6, - 0xe9, 0xd1, 0x37, 0x70, 0xa7, 0xdd, 0x7a, 0xca, 0x78, 0x67, 0xd0, 0xfe, 0x36, 0xbf, 0x20, 0x33, - 0x8f, 0xce, 0x60, 0xbd, 0xc5, 0x21, 0x3a, 0xe4, 0x17, 0x47, 0xe9, 0xdf, 0xe3, 0x97, 0x30, 0x37, - 0xf0, 0xf6, 0x8e, 0x4e, 0xcf, 0x98, 0xf7, 0xed, 0x8c, 0x79, 0x9f, 0x0a, 0x46, 0x4e, 0x0b, 0x46, - 0xbe, 0x16, 0x8c, 0xfc, 0x28, 0x18, 0x39, 0x7a, 0xfa, 0x1f, 0x57, 0xfa, 0xe4, 0xcf, 0xea, 0xb5, - 0x37, 0xbb, 0x5e, 0x5d, 0xe3, 0xa3, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x95, 0xb3, 0x11, 0xd8, - 0xf2, 0x03, 0x00, 0x00, -} - -func (m *RegisterComputeAgentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RegisterComputeAgentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RegisterComputeAgentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintNetworkconfigproxy(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0x12 - } - if len(m.AgentAddress) > 0 { - i -= len(m.AgentAddress) - copy(dAtA[i:], m.AgentAddress) - i = encodeVarintNetworkconfigproxy(dAtA, i, uint64(len(m.AgentAddress))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RegisterComputeAgentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RegisterComputeAgentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RegisterComputeAgentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *UnregisterComputeAgentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UnregisterComputeAgentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UnregisterComputeAgentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintNetworkconfigproxy(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UnregisterComputeAgentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UnregisterComputeAgentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UnregisterComputeAgentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *ConfigureNetworkingInternalRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfigureNetworkingInternalRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfigureNetworkingInternalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.RequestType != 0 { - i = encodeVarintNetworkconfigproxy(dAtA, i, uint64(m.RequestType)) - i-- - dAtA[i] = 0x10 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintNetworkconfigproxy(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ConfigureNetworkingInternalResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfigureNetworkingInternalResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfigureNetworkingInternalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func encodeVarintNetworkconfigproxy(dAtA []byte, offset int, v uint64) int { - offset -= sovNetworkconfigproxy(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *RegisterComputeAgentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.AgentAddress) - if l > 0 { - n += 1 + l + sovNetworkconfigproxy(uint64(l)) - } - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovNetworkconfigproxy(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RegisterComputeAgentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UnregisterComputeAgentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovNetworkconfigproxy(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UnregisterComputeAgentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConfigureNetworkingInternalRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovNetworkconfigproxy(uint64(l)) - } - if m.RequestType != 0 { - n += 1 + sovNetworkconfigproxy(uint64(m.RequestType)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConfigureNetworkingInternalResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovNetworkconfigproxy(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozNetworkconfigproxy(x uint64) (n int) { - return sovNetworkconfigproxy(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *RegisterComputeAgentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RegisterComputeAgentRequest{`, - `AgentAddress:` + fmt.Sprintf("%v", this.AgentAddress) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RegisterComputeAgentResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RegisterComputeAgentResponse{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UnregisterComputeAgentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UnregisterComputeAgentRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UnregisterComputeAgentResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UnregisterComputeAgentResponse{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigureNetworkingInternalRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConfigureNetworkingInternalRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `RequestType:` + fmt.Sprintf("%v", this.RequestType) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigureNetworkingInternalResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConfigureNetworkingInternalResponse{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringNetworkconfigproxy(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} - -type NetworkConfigProxyService interface { - RegisterComputeAgent(ctx context.Context, req *RegisterComputeAgentRequest) (*RegisterComputeAgentResponse, error) - UnregisterComputeAgent(ctx context.Context, req *UnregisterComputeAgentRequest) (*UnregisterComputeAgentResponse, error) - ConfigureNetworking(ctx context.Context, req *ConfigureNetworkingInternalRequest) (*ConfigureNetworkingInternalResponse, error) -} - -func RegisterNetworkConfigProxyService(srv *github_com_containerd_ttrpc.Server, svc NetworkConfigProxyService) { - srv.Register("NetworkConfigProxy", map[string]github_com_containerd_ttrpc.Method{ - "RegisterComputeAgent": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req RegisterComputeAgentRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.RegisterComputeAgent(ctx, &req) - }, - "UnregisterComputeAgent": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req UnregisterComputeAgentRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.UnregisterComputeAgent(ctx, &req) - }, - "ConfigureNetworking": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ConfigureNetworkingInternalRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.ConfigureNetworking(ctx, &req) - }, - }) -} - -type networkConfigProxyClient struct { - client *github_com_containerd_ttrpc.Client -} - -func NewNetworkConfigProxyClient(client *github_com_containerd_ttrpc.Client) NetworkConfigProxyService { - return &networkConfigProxyClient{ - client: client, - } -} - -func (c *networkConfigProxyClient) RegisterComputeAgent(ctx context.Context, req *RegisterComputeAgentRequest) (*RegisterComputeAgentResponse, error) { - var resp RegisterComputeAgentResponse - if err := c.client.Call(ctx, "NetworkConfigProxy", "RegisterComputeAgent", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *networkConfigProxyClient) UnregisterComputeAgent(ctx context.Context, req *UnregisterComputeAgentRequest) (*UnregisterComputeAgentResponse, error) { - var resp UnregisterComputeAgentResponse - if err := c.client.Call(ctx, "NetworkConfigProxy", "UnregisterComputeAgent", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *networkConfigProxyClient) ConfigureNetworking(ctx context.Context, req *ConfigureNetworkingInternalRequest) (*ConfigureNetworkingInternalResponse, error) { - var resp ConfigureNetworkingInternalResponse - if err := c.client.Call(ctx, "NetworkConfigProxy", "ConfigureNetworking", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} -func (m *RegisterComputeAgentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RegisterComputeAgentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RegisterComputeAgentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentAddress = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNetworkconfigproxy(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RegisterComputeAgentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RegisterComputeAgentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RegisterComputeAgentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNetworkconfigproxy(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UnregisterComputeAgentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UnregisterComputeAgentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UnregisterComputeAgentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNetworkconfigproxy(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UnregisterComputeAgentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UnregisterComputeAgentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UnregisterComputeAgentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNetworkconfigproxy(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigureNetworkingInternalRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigureNetworkingInternalRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigureNetworkingInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestType", wireType) - } - m.RequestType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RequestType |= RequestTypeInternal(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipNetworkconfigproxy(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigureNetworkingInternalResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigureNetworkingInternalResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigureNetworkingInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNetworkconfigproxy(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNetworkconfigproxy - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipNetworkconfigproxy(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNetworkconfigproxy - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthNetworkconfigproxy - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupNetworkconfigproxy - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthNetworkconfigproxy - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthNetworkconfigproxy = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowNetworkconfigproxy = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupNetworkconfigproxy = fmt.Errorf("proto: unexpected end of group") -) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/test/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go deleted file mode 100644 index 0e2b7e9bf6..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go +++ /dev/null @@ -1,48 +0,0 @@ -package oc - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/log" - "go.opencensus.io/trace" -) - -var DefaultSampler = trace.AlwaysSample() - -// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If -// `err` is `nil` assumes `trace.StatusCodeOk`. -func SetSpanStatus(span *trace.Span, err error) { - status := trace.Status{} - if err != nil { - // TODO: JTERRY75 - Handle errors in a non-generic way - status.Code = trace.StatusCodeUnknown - status.Message = err.Error() - } - span.SetStatus(status) -} - -// StartSpan wraps "go.opencensus.io/trace".StartSpan, but, if the span is sampling, -// adds a log entry to the context that points to the newly created span. -func StartSpan(ctx context.Context, name string, o ...trace.StartOption) (context.Context, *trace.Span) { - ctx, s := trace.StartSpan(ctx, name, o...) - return update(ctx, s) -} - -// StartSpanWithRemoteParent wraps "go.opencensus.io/trace".StartSpanWithRemoteParent. -// -// See StartSpan for more information. -func StartSpanWithRemoteParent(ctx context.Context, name string, parent trace.SpanContext, o ...trace.StartOption) (context.Context, *trace.Span) { - ctx, s := trace.StartSpanWithRemoteParent(ctx, name, parent, o...) - return update(ctx, s) -} - -func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) { - if s.IsRecordingEvents() { - ctx = log.UpdateContext(ctx) - } - - return ctx, s -} - -var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer) -var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/annotations.go b/test/vendor/github.com/Microsoft/hcsshim/internal/oci/annotations.go deleted file mode 100644 index d7274c0362..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/annotations.go +++ /dev/null @@ -1,150 +0,0 @@ -package oci - -import ( - "context" - "errors" - "strconv" - "strings" - - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/pkg/annotations" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" -) - -var ErrAnnotationExpansionConflict = errors.New("annotation expansion conflict") - -// ProcessAnnotations expands annotations into their corresponding annotation groups -func ProcessAnnotations(ctx context.Context, s *specs.Spec) (err error) { - // Named `Process` and not `Expand` since this function may be expanded (pun intended) to - // deal with other annotation issues and validation. - - // Rathen than give up part of the way through on error, this just emits a warning (similar - // to the `parseAnnotation*` functions) and continues through, so the spec is not left in a - // (partially) unusable form. - // If multiple different errors are to be raised, they should be combined or, if they - // are logged, only the last kept, depending on their severity. - - // expand annotations - for key, exps := range annotations.AnnotationExpansions { - // check if annotation is present - if val, ok := s.Annotations[key]; ok { - // ideally, some normalization would occur here (ie, "True" -> "true") - // but strings may be case-sensitive - for _, k := range exps { - if v, ok := s.Annotations[k]; ok && val != v { - err = ErrAnnotationExpansionConflict - log.G(ctx).WithFields(logrus.Fields{ - logfields.OCIAnnotation: key, - logfields.Value: val, - logfields.OCIAnnotation + "-conflict": k, - logfields.Value + "-conflict": v, - }).WithError(err).Warning("annotation expansion would overwrite conflicting value") - continue - } - s.Annotations[k] = val - } - } - } - - return err -} - -// handle specific annotations - -// ParseAnnotationsDisableGMSA searches for the boolean value which specifies -// if providing a gMSA credential should be disallowed. Returns the value found, -// if parsable, otherwise returns false otherwise. -func ParseAnnotationsDisableGMSA(ctx context.Context, s *specs.Spec) bool { - return parseAnnotationsBool(ctx, s.Annotations, annotations.WCOWDisableGMSA, false) -} - -// ParseAnnotationsSaveAsTemplate searches for the boolean value which specifies -// if this create request should be considered as a template creation request. If value -// is found the returns the actual value, returns false otherwise. -func ParseAnnotationsSaveAsTemplate(ctx context.Context, s *specs.Spec) bool { - return parseAnnotationsBool(ctx, s.Annotations, annotations.SaveAsTemplate, false) -} - -// ParseAnnotationsTemplateID searches for the templateID in the create request. If the -// value is found then returns the value otherwise returns the empty string. -func ParseAnnotationsTemplateID(ctx context.Context, s *specs.Spec) string { - return parseAnnotationsString(s.Annotations, annotations.TemplateID, "") -} - -// general annotation parsing - -// parseAnnotationsBool searches `a` for `key` and if found verifies that the -// value is `true` or `false` in any case. If `key` is not found returns `def`. -func parseAnnotationsBool(ctx context.Context, a map[string]string, key string, def bool) bool { - if v, ok := a[key]; ok { - switch strings.ToLower(v) { - case "true": - return true - case "false": - return false - default: - logAnnotationParseError(ctx, key, v, logfields.Bool, nil) - } - } - return def -} - -// parseAnnotationsUint32 searches `a` for `key` and if found verifies that the -// value is a 32 bit unsigned integer. If `key` is not found returns `def`. -func parseAnnotationsUint32(ctx context.Context, a map[string]string, key string, def uint32) uint32 { - if v, ok := a[key]; ok { - countu, err := strconv.ParseUint(v, 10, 32) - if err == nil { - v := uint32(countu) - return v - } - logAnnotationParseError(ctx, key, v, logfields.Uint32, err) - } - return def -} - -// parseAnnotationsUint64 searches `a` for `key` and if found verifies that the -// value is a 64 bit unsigned integer. If `key` is not found returns `def`. -func parseAnnotationsUint64(ctx context.Context, a map[string]string, key string, def uint64) uint64 { - if v, ok := a[key]; ok { - countu, err := strconv.ParseUint(v, 10, 64) - if err == nil { - return countu - } - logAnnotationParseError(ctx, key, v, logfields.Uint64, err) - } - return def -} - -// parseAnnotationsString searches `a` for `key`. If `key` is not found returns `def`. -func parseAnnotationsString(a map[string]string, key string, def string) string { - if v, ok := a[key]; ok { - return v - } - return def -} - -// ParseAnnotationCommaSeparated searches `annotations` for `annotation` corresponding to a -// list of comma separated strings -func ParseAnnotationCommaSeparated(annotation string, annotations map[string]string) []string { - cs, ok := annotations[annotation] - if !ok || cs == "" { - return nil - } - results := strings.Split(cs, ",") - return results -} - -func logAnnotationParseError(ctx context.Context, k, v, et string, err error) { - entry := log.G(ctx).WithFields(logrus.Fields{ - logfields.OCIAnnotation: k, - logfields.Value: v, - logfields.ExpectedType: et, - }) - if err != nil { - entry = entry.WithError(err) - } - entry.Warning("annotation could not be parsed") -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/sandbox.go b/test/vendor/github.com/Microsoft/hcsshim/internal/oci/sandbox.go deleted file mode 100644 index 3b9064d671..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/sandbox.go +++ /dev/null @@ -1,54 +0,0 @@ -package oci - -import ( - "fmt" - - "github.com/Microsoft/hcsshim/pkg/annotations" -) - -// KubernetesContainerType defines the valid types of the -// `annotations.KubernetesContainerType` annotation. -type KubernetesContainerType string - -const ( - // KubernetesContainerTypeNone is only valid when - // `annotations.KubernetesContainerType` is not set. - KubernetesContainerTypeNone KubernetesContainerType = "" - // KubernetesContainerTypeContainer is valid when - // `annotations.KubernetesContainerType == "container"`. - KubernetesContainerTypeContainer KubernetesContainerType = "container" - // KubernetesContainerTypeSandbox is valid when - // `annotations.KubernetesContainerType == "sandbox"`. - KubernetesContainerTypeSandbox KubernetesContainerType = "sandbox" -) - -// GetSandboxTypeAndID parses `specAnnotations` searching for the -// `KubernetesContainerTypeAnnotation` and `KubernetesSandboxIDAnnotation` -// annotations and if found validates the set before returning. -func GetSandboxTypeAndID(specAnnotations map[string]string) (KubernetesContainerType, string, error) { - var ct KubernetesContainerType - if t, ok := specAnnotations[annotations.KubernetesContainerType]; ok { - switch t { - case string(KubernetesContainerTypeContainer): - ct = KubernetesContainerTypeContainer - case string(KubernetesContainerTypeSandbox): - ct = KubernetesContainerTypeSandbox - default: - return KubernetesContainerTypeNone, "", fmt.Errorf("invalid '%s': '%s'", annotations.KubernetesContainerType, t) - } - } - - id := specAnnotations[annotations.KubernetesSandboxID] - - switch ct { - case KubernetesContainerTypeContainer, KubernetesContainerTypeSandbox: - if id == "" { - return KubernetesContainerTypeNone, "", fmt.Errorf("cannot specify '%s' without '%s'", annotations.KubernetesContainerType, annotations.KubernetesSandboxID) - } - default: - if id != "" { - return KubernetesContainerTypeNone, "", fmt.Errorf("cannot specify '%s' without '%s'", annotations.KubernetesSandboxID, annotations.KubernetesContainerType) - } - } - return ct, id, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/uvm.go b/test/vendor/github.com/Microsoft/hcsshim/internal/oci/uvm.go deleted file mode 100644 index 4a624639f8..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/uvm.go +++ /dev/null @@ -1,347 +0,0 @@ -//go:build windows - -package oci - -import ( - "context" - "errors" - "fmt" - "strconv" - - runhcsopts "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" - "github.com/Microsoft/hcsshim/internal/clone" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/Microsoft/hcsshim/pkg/annotations" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" -) - -// UVM specific annotation parsing - -// ParseAnnotationsCPUCount searches `s.Annotations` for the CPU annotation. If -// not found searches `s` for the Windows CPU section. If neither are found -// returns `def`. -func ParseAnnotationsCPUCount(ctx context.Context, s *specs.Spec, annotation string, def int32) int32 { - if m := parseAnnotationsUint64(ctx, s.Annotations, annotation, 0); m != 0 { - return int32(m) - } - if s.Windows != nil && - s.Windows.Resources != nil && - s.Windows.Resources.CPU != nil && - s.Windows.Resources.CPU.Count != nil && - *s.Windows.Resources.CPU.Count > 0 { - return int32(*s.Windows.Resources.CPU.Count) - } - return def -} - -// ParseAnnotationsCPULimit searches `s.Annotations` for the CPU annotation. If -// not found searches `s` for the Windows CPU section. If neither are found -// returns `def`. -func ParseAnnotationsCPULimit(ctx context.Context, s *specs.Spec, annotation string, def int32) int32 { - if m := parseAnnotationsUint64(ctx, s.Annotations, annotation, 0); m != 0 { - return int32(m) - } - if s.Windows != nil && - s.Windows.Resources != nil && - s.Windows.Resources.CPU != nil && - s.Windows.Resources.CPU.Maximum != nil && - *s.Windows.Resources.CPU.Maximum > 0 { - return int32(*s.Windows.Resources.CPU.Maximum) - } - return def -} - -// ParseAnnotationsCPUWeight searches `s.Annotations` for the CPU annotation. If -// not found searches `s` for the Windows CPU section. If neither are found -// returns `def`. -func ParseAnnotationsCPUWeight(ctx context.Context, s *specs.Spec, annotation string, def int32) int32 { - if m := parseAnnotationsUint64(ctx, s.Annotations, annotation, 0); m != 0 { - return int32(m) - } - if s.Windows != nil && - s.Windows.Resources != nil && - s.Windows.Resources.CPU != nil && - s.Windows.Resources.CPU.Shares != nil && - *s.Windows.Resources.CPU.Shares > 0 { - return int32(*s.Windows.Resources.CPU.Shares) - } - return def -} - -// ParseAnnotationsStorageIops searches `s.Annotations` for the `Iops` -// annotation. If not found searches `s` for the Windows Storage section. If -// neither are found returns `def`. -func ParseAnnotationsStorageIops(ctx context.Context, s *specs.Spec, annotation string, def int32) int32 { - if m := parseAnnotationsUint64(ctx, s.Annotations, annotation, 0); m != 0 { - return int32(m) - } - if s.Windows != nil && - s.Windows.Resources != nil && - s.Windows.Resources.Storage != nil && - s.Windows.Resources.Storage.Iops != nil && - *s.Windows.Resources.Storage.Iops > 0 { - return int32(*s.Windows.Resources.Storage.Iops) - } - return def -} - -// ParseAnnotationsStorageBps searches `s.Annotations` for the `Bps` annotation. -// If not found searches `s` for the Windows Storage section. If neither are -// found returns `def`. -func ParseAnnotationsStorageBps(ctx context.Context, s *specs.Spec, annotation string, def int32) int32 { - if m := parseAnnotationsUint64(ctx, s.Annotations, annotation, 0); m != 0 { - return int32(m) - } - if s.Windows != nil && - s.Windows.Resources != nil && - s.Windows.Resources.Storage != nil && - s.Windows.Resources.Storage.Bps != nil && - *s.Windows.Resources.Storage.Bps > 0 { - return int32(*s.Windows.Resources.Storage.Bps) - } - return def -} - -// ParseAnnotationsMemory searches `s.Annotations` for the memory annotation. If -// not found searches `s` for the Windows memory section. If neither are found -// returns `def`. -// -// Note: The returned value is in `MB`. -func ParseAnnotationsMemory(ctx context.Context, s *specs.Spec, annotation string, def uint64) uint64 { - if m := parseAnnotationsUint64(ctx, s.Annotations, annotation, 0); m != 0 { - return m - } - if s.Windows != nil && - s.Windows.Resources != nil && - s.Windows.Resources.Memory != nil && - s.Windows.Resources.Memory.Limit != nil && - *s.Windows.Resources.Memory.Limit > 0 { - return (*s.Windows.Resources.Memory.Limit / 1024 / 1024) - } - return def -} - -// parseAnnotationsPreferredRootFSType searches `a` for `key` and verifies that the -// value is in the set of allowed values. If `key` is not found returns `def`. -func parseAnnotationsPreferredRootFSType(ctx context.Context, a map[string]string, key string, def uvm.PreferredRootFSType) uvm.PreferredRootFSType { - if v, ok := a[key]; ok { - switch v { - case "initrd": - return uvm.PreferredRootFSTypeInitRd - case "vhd": - return uvm.PreferredRootFSTypeVHD - default: - log.G(ctx).WithFields(logrus.Fields{ - "annotation": key, - "value": v, - }).Warn("annotation value must be 'initrd' or 'vhd'") - } - } - return def -} - -func ParseCloneAnnotations(ctx context.Context, s *specs.Spec) (isTemplate bool, templateID string, err error) { - templateID = ParseAnnotationsTemplateID(ctx, s) - isTemplate = ParseAnnotationsSaveAsTemplate(ctx, s) - if templateID != "" && isTemplate { - return false, "", fmt.Errorf("templateID and save as template flags can not be passed in the same request") - } - - if (isTemplate || templateID != "") && !IsWCOW(s) { - return false, "", fmt.Errorf("save as template and creating clones is only available for WCOW") - } - return -} - -// handleAnnotationKernelDirectBoot handles parsing annotationKernelDirectBoot and setting -// implied annotations from the result. -func handleAnnotationKernelDirectBoot(ctx context.Context, a map[string]string, lopts *uvm.OptionsLCOW) { - lopts.KernelDirect = parseAnnotationsBool(ctx, a, annotations.KernelDirectBoot, lopts.KernelDirect) - if !lopts.KernelDirect { - lopts.KernelFile = uvm.KernelFile - } -} - -// handleAnnotationPreferredRootFSType handles parsing annotationPreferredRootFSType and setting -// implied annotations from the result -func handleAnnotationPreferredRootFSType(ctx context.Context, a map[string]string, lopts *uvm.OptionsLCOW) { - lopts.PreferredRootFSType = parseAnnotationsPreferredRootFSType(ctx, a, annotations.PreferredRootFSType, lopts.PreferredRootFSType) - switch lopts.PreferredRootFSType { - case uvm.PreferredRootFSTypeInitRd: - lopts.RootFSFile = uvm.InitrdFile - case uvm.PreferredRootFSTypeVHD: - lopts.RootFSFile = uvm.VhdFile - } -} - -// handleAnnotationFullyPhysicallyBacked handles parsing annotationFullyPhysicallyBacked and setting -// implied annotations from the result. For both LCOW and WCOW options. -func handleAnnotationFullyPhysicallyBacked(ctx context.Context, a map[string]string, opts interface{}) { - switch options := opts.(type) { - case *uvm.OptionsLCOW: - options.FullyPhysicallyBacked = parseAnnotationsBool(ctx, a, annotations.FullyPhysicallyBacked, options.FullyPhysicallyBacked) - if options.FullyPhysicallyBacked { - options.AllowOvercommit = false - options.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd - options.RootFSFile = uvm.InitrdFile - options.VPMemDeviceCount = 0 - } - case *uvm.OptionsWCOW: - options.FullyPhysicallyBacked = parseAnnotationsBool(ctx, a, annotations.FullyPhysicallyBacked, options.FullyPhysicallyBacked) - if options.FullyPhysicallyBacked { - options.AllowOvercommit = false - } - } -} - -// handleCloneAnnotations handles parsing annotations related to template creation and cloning -// Since late cloning is only supported for WCOW this function only deals with WCOW options. -func handleCloneAnnotations(ctx context.Context, a map[string]string, wopts *uvm.OptionsWCOW) (err error) { - wopts.IsTemplate = parseAnnotationsBool(ctx, a, annotations.SaveAsTemplate, false) - templateID := parseAnnotationsString(a, annotations.TemplateID, "") - if templateID != "" { - tc, err := clone.FetchTemplateConfig(ctx, templateID) - if err != nil { - return err - } - wopts.TemplateConfig = &uvm.UVMTemplateConfig{ - UVMID: tc.TemplateUVMID, - CreateOpts: tc.TemplateUVMCreateOpts, - Resources: tc.TemplateUVMResources, - } - wopts.IsClone = true - } - return nil -} - -// handleSecurityPolicy handles parsing SecurityPolicy and NoSecurityHardware and setting -// implied options from the results. Both LCOW only, not WCOW -func handleSecurityPolicy(ctx context.Context, a map[string]string, lopts *uvm.OptionsLCOW) { - lopts.SecurityPolicy = parseAnnotationsString(a, annotations.SecurityPolicy, lopts.SecurityPolicy) - // allow actual isolated boot etc to be ignored if we have no hardware. Required for dev - // this is not a security issue as the attestation will fail without a genuine report - noSecurityHardware := parseAnnotationsBool(ctx, a, annotations.NoSecurityHardware, false) - - // if there is a security policy (and SNP) we currently boot in a way that doesn't support any boot options - // this might change if the building of the vmgs file were to be done on demand but that is likely - // much slower and noy very useful. We do respect the filename of the vmgs file so if it is necessary to - // have different options then multiple files could be used. - if len(lopts.SecurityPolicy) > 0 && !noSecurityHardware { - // VPMem not supported by the enlightened kernel for SNP so set count to zero. - lopts.VPMemDeviceCount = 0 - // set the default GuestState filename. - lopts.GuestStateFile = uvm.GuestStateFile - lopts.KernelBootOptions = "" - lopts.PreferredRootFSType = uvm.PreferredRootFSTypeNA - lopts.AllowOvercommit = false - lopts.SecurityPolicyEnabled = true - } -} - -// sets options common to both WCOW and LCOW from annotations -func specToUVMCreateOptionsCommon(ctx context.Context, opts *uvm.Options, s *specs.Spec) { - opts.MemorySizeInMB = ParseAnnotationsMemory(ctx, s, annotations.MemorySizeInMB, opts.MemorySizeInMB) - opts.LowMMIOGapInMB = parseAnnotationsUint64(ctx, s.Annotations, annotations.MemoryLowMMIOGapInMB, opts.LowMMIOGapInMB) - opts.HighMMIOBaseInMB = parseAnnotationsUint64(ctx, s.Annotations, annotations.MemoryHighMMIOBaseInMB, opts.HighMMIOBaseInMB) - opts.HighMMIOGapInMB = parseAnnotationsUint64(ctx, s.Annotations, annotations.MemoryHighMMIOGapInMB, opts.HighMMIOGapInMB) - opts.AllowOvercommit = parseAnnotationsBool(ctx, s.Annotations, annotations.AllowOvercommit, opts.AllowOvercommit) - opts.EnableDeferredCommit = parseAnnotationsBool(ctx, s.Annotations, annotations.EnableDeferredCommit, opts.EnableDeferredCommit) - opts.ProcessorCount = ParseAnnotationsCPUCount(ctx, s, annotations.ProcessorCount, opts.ProcessorCount) - opts.ProcessorLimit = ParseAnnotationsCPULimit(ctx, s, annotations.ProcessorLimit, opts.ProcessorLimit) - opts.ProcessorWeight = ParseAnnotationsCPUWeight(ctx, s, annotations.ProcessorWeight, opts.ProcessorWeight) - opts.StorageQoSBandwidthMaximum = ParseAnnotationsStorageBps(ctx, s, annotations.StorageQoSBandwidthMaximum, opts.StorageQoSBandwidthMaximum) - opts.StorageQoSIopsMaximum = ParseAnnotationsStorageIops(ctx, s, annotations.StorageQoSIopsMaximum, opts.StorageQoSIopsMaximum) - opts.CPUGroupID = parseAnnotationsString(s.Annotations, annotations.CPUGroupID, opts.CPUGroupID) - opts.NetworkConfigProxy = parseAnnotationsString(s.Annotations, annotations.NetworkConfigProxy, opts.NetworkConfigProxy) - opts.ProcessDumpLocation = parseAnnotationsString(s.Annotations, annotations.ContainerProcessDumpLocation, opts.ProcessDumpLocation) - opts.NoWritableFileShares = parseAnnotationsBool(ctx, s.Annotations, annotations.DisableWritableFileShares, opts.NoWritableFileShares) -} - -// SpecToUVMCreateOpts parses `s` and returns either `*uvm.OptionsLCOW` or -// `*uvm.OptionsWCOW`. -func SpecToUVMCreateOpts(ctx context.Context, s *specs.Spec, id, owner string) (interface{}, error) { - if !IsIsolated(s) { - return nil, errors.New("cannot create UVM opts for non-isolated spec") - } - if IsLCOW(s) { - lopts := uvm.NewDefaultOptionsLCOW(id, owner) - specToUVMCreateOptionsCommon(ctx, lopts.Options, s) - - lopts.EnableColdDiscardHint = parseAnnotationsBool(ctx, s.Annotations, annotations.EnableColdDiscardHint, lopts.EnableColdDiscardHint) - lopts.VPMemDeviceCount = parseAnnotationsUint32(ctx, s.Annotations, annotations.VPMemCount, lopts.VPMemDeviceCount) - lopts.VPMemSizeBytes = parseAnnotationsUint64(ctx, s.Annotations, annotations.VPMemSize, lopts.VPMemSizeBytes) - lopts.VPMemNoMultiMapping = parseAnnotationsBool(ctx, s.Annotations, annotations.VPMemNoMultiMapping, lopts.VPMemNoMultiMapping) - lopts.VPCIEnabled = parseAnnotationsBool(ctx, s.Annotations, annotations.VPCIEnabled, lopts.VPCIEnabled) - lopts.BootFilesPath = parseAnnotationsString(s.Annotations, annotations.BootFilesRootPath, lopts.BootFilesPath) - lopts.EnableScratchEncryption = parseAnnotationsBool(ctx, s.Annotations, annotations.EncryptedScratchDisk, lopts.EnableScratchEncryption) - lopts.SecurityPolicy = parseAnnotationsString(s.Annotations, annotations.SecurityPolicy, lopts.SecurityPolicy) - lopts.KernelBootOptions = parseAnnotationsString(s.Annotations, annotations.KernelBootOptions, lopts.KernelBootOptions) - lopts.DisableTimeSyncService = parseAnnotationsBool(ctx, s.Annotations, annotations.DisableLCOWTimeSyncService, lopts.DisableTimeSyncService) - handleAnnotationPreferredRootFSType(ctx, s.Annotations, lopts) - handleAnnotationKernelDirectBoot(ctx, s.Annotations, lopts) - - // parsing of FullyPhysicallyBacked needs to go after handling kernel direct boot and - // preferred rootfs type since it may overwrite settings created by those - handleAnnotationFullyPhysicallyBacked(ctx, s.Annotations, lopts) - - // SecurityPolicy is very sensitive to other settings and will silently change those that are incompatible. - // Eg VMPem device count, overridden kernel option cannot be respected. - handleSecurityPolicy(ctx, s.Annotations, lopts) - - // override the default GuestState filename if specified - lopts.GuestStateFile = parseAnnotationsString(s.Annotations, annotations.GuestStateFile, lopts.GuestStateFile) - return lopts, nil - } else if IsWCOW(s) { - wopts := uvm.NewDefaultOptionsWCOW(id, owner) - specToUVMCreateOptionsCommon(ctx, wopts.Options, s) - - wopts.DisableCompartmentNamespace = parseAnnotationsBool(ctx, s.Annotations, annotations.DisableCompartmentNamespace, wopts.DisableCompartmentNamespace) - wopts.NoDirectMap = parseAnnotationsBool(ctx, s.Annotations, annotations.VSMBNoDirectMap, wopts.NoDirectMap) - wopts.NoInheritHostTimezone = parseAnnotationsBool(ctx, s.Annotations, annotations.NoInheritHostTimezone, wopts.NoInheritHostTimezone) - handleAnnotationFullyPhysicallyBacked(ctx, s.Annotations, wopts) - if err := handleCloneAnnotations(ctx, s.Annotations, wopts); err != nil { - return nil, err - } - return wopts, nil - } - return nil, errors.New("cannot create UVM opts spec is not LCOW or WCOW") -} - -// UpdateSpecFromOptions sets extra annotations on the OCI spec based on the -// `opts` struct. -func UpdateSpecFromOptions(s specs.Spec, opts *runhcsopts.Options) specs.Spec { - if opts == nil { - return s - } - - if _, ok := s.Annotations[annotations.BootFilesRootPath]; !ok && opts.BootFilesRootPath != "" { - s.Annotations[annotations.BootFilesRootPath] = opts.BootFilesRootPath - } - - if _, ok := s.Annotations[annotations.ProcessorCount]; !ok && opts.VmProcessorCount != 0 { - s.Annotations[annotations.ProcessorCount] = strconv.FormatInt(int64(opts.VmProcessorCount), 10) - } - - if _, ok := s.Annotations[annotations.MemorySizeInMB]; !ok && opts.VmMemorySizeInMb != 0 { - s.Annotations[annotations.MemorySizeInMB] = strconv.FormatInt(int64(opts.VmMemorySizeInMb), 10) - } - - if _, ok := s.Annotations[annotations.GPUVHDPath]; !ok && opts.GPUVHDPath != "" { - s.Annotations[annotations.GPUVHDPath] = opts.GPUVHDPath - } - - if _, ok := s.Annotations[annotations.NetworkConfigProxy]; !ok && opts.NCProxyAddr != "" { - s.Annotations[annotations.NetworkConfigProxy] = opts.NCProxyAddr - } - - for key, value := range opts.DefaultContainerAnnotations { - // Make sure not to override any annotations which are set explicitly - if _, ok := s.Annotations[key]; !ok { - s.Annotations[key] = value - } - } - - return s -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/processorinfo/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/processorinfo/doc.go deleted file mode 100644 index dd2a53b5c6..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/processorinfo/doc.go +++ /dev/null @@ -1 +0,0 @@ -package processorinfo diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/processorinfo/host_information.go b/test/vendor/github.com/Microsoft/hcsshim/internal/processorinfo/host_information.go deleted file mode 100644 index 0aa766a43e..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/processorinfo/host_information.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build windows - -package processorinfo - -import ( - "context" - "encoding/json" - "errors" - "fmt" - - "github.com/Microsoft/hcsshim/internal/hcs" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// HostProcessorInfo queries HCS for the host's processor information, including topology -// and NUMA configuration. This is also used to reliably get the hosts number of logical -// processors in multi processor group settings. -func HostProcessorInfo(ctx context.Context) (*hcsschema.ProcessorTopology, error) { - q := hcsschema.PropertyQuery{ - PropertyTypes: []hcsschema.PropertyType{hcsschema.PTProcessorTopology}, - } - serviceProps, err := hcs.GetServiceProperties(ctx, q) - if err != nil { - return nil, fmt.Errorf("failed to retrieve processor and processor topology information: %s", err) - } - if len(serviceProps.Properties) != 1 { - return nil, errors.New("wrong number of service properties present") - } - processorTopology := &hcsschema.ProcessorTopology{} - if err := json.Unmarshal(serviceProps.Properties[0], processorTopology); err != nil { - return nil, fmt.Errorf("failed to unmarshal host processor topology: %s", err) - } - return processorTopology, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/processorinfo/processor_count.go b/test/vendor/github.com/Microsoft/hcsshim/internal/processorinfo/processor_count.go deleted file mode 100644 index 848df8248e..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/processorinfo/processor_count.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build windows - -package processorinfo - -import ( - "runtime" - - "github.com/Microsoft/hcsshim/internal/winapi" -) - -// ProcessorCount calls the win32 API function GetMaximumProcessorCount -// to get the total number of logical processors on the system. If this -// fails it will fall back to runtime.NumCPU -func ProcessorCount() int32 { - if amount := winapi.GetActiveProcessorCount(winapi.ALL_PROCESSOR_GROUPS); amount != 0 { - return int32(amount) - } - return int32(runtime.NumCPU()) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go b/test/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go deleted file mode 100644 index d8d0c20b10..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go +++ /dev/null @@ -1,56 +0,0 @@ -package guestrequest - -// These are constants for v2 schema modify requests. - -type RequestType string -type ResourceType string - -// RequestType const -const ( - RequestTypeAdd RequestType = "Add" - RequestTypeRemove RequestType = "Remove" - RequestTypePreAdd RequestType = "PreAdd" // For networking - RequestTypeUpdate RequestType = "Update" -) - -type SignalValueWCOW string - -const ( - SignalValueWCOWCtrlC SignalValueWCOW = "CtrlC" - SignalValueWCOWCtrlBreak SignalValueWCOW = "CtrlBreak" - SignalValueWCOWCtrlClose SignalValueWCOW = "CtrlClose" - SignalValueWCOWCtrlLogOff SignalValueWCOW = "CtrlLogOff" - SignalValueWCOWCtrlShutdown SignalValueWCOW = "CtrlShutdown" -) - -// ModificationRequest is for modify commands passed to the guest. -type ModificationRequest struct { - RequestType RequestType `json:"RequestType,omitempty"` - ResourceType ResourceType `json:"ResourceType,omitempty"` - Settings interface{} `json:"Settings,omitempty"` -} - -type NetworkModifyRequest struct { - AdapterId string `json:"AdapterId,omitempty"` //nolint:stylecheck - RequestType RequestType `json:"RequestType,omitempty"` - Settings interface{} `json:"Settings,omitempty"` -} - -type RS4NetworkModifyRequest struct { - AdapterInstanceId string `json:"AdapterInstanceId,omitempty"` //nolint:stylecheck - RequestType RequestType `json:"RequestType,omitempty"` - Settings interface{} `json:"Settings,omitempty"` -} - -var ( - // V5 GUIDs for SCSI controllers - // These GUIDs are created with namespace GUID "d422512d-2bf2-4752-809d-7b82b5fcb1b4" - // and index as names. For example, first GUID is created like this: - // guid.NewV5("d422512d-2bf2-4752-809d-7b82b5fcb1b4", []byte("0")) - ScsiControllerGuids = []string{ - "df6d0690-79e5-55b6-a5ec-c1e2f77f580a", - "0110f83b-de10-5172-a266-78bca56bf50a", - "b5d2d8d4-3a75-51bf-945b-3444dc6b8579", - "305891a9-b251-5dfe-91a2-c25d9212275b", - } -) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestresource/resources.go b/test/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestresource/resources.go deleted file mode 100644 index 55ae09730c..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestresource/resources.go +++ /dev/null @@ -1,160 +0,0 @@ -package guestresource - -import ( - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/opencontainers/runtime-spec/specs-go" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// Arguably, many of these (at least CombinedLayers) should have been generated -// by swagger. -// -// This will also change package name due to an inbound breaking change. - -const ( - // These are constants for v2 schema modify guest requests. - // ResourceTypeMappedDirectory is the modify resource type for mapped - // directories - ResourceTypeMappedDirectory guestrequest.ResourceType = "MappedDirectory" - // ResourceTypeMappedVirtualDisk is the modify resource type for mapped - // virtual disks - ResourceTypeMappedVirtualDisk guestrequest.ResourceType = "MappedVirtualDisk" - // ResourceTypeNetwork is the modify resource type for the `NetworkAdapterV2` - // device. - ResourceTypeNetwork guestrequest.ResourceType = "Network" - ResourceTypeNetworkNamespace guestrequest.ResourceType = "NetworkNamespace" - // ResourceTypeCombinedLayers is the modify resource type for combined - // layers - ResourceTypeCombinedLayers guestrequest.ResourceType = "CombinedLayers" - // ResourceTypeVPMemDevice is the modify resource type for VPMem devices - ResourceTypeVPMemDevice guestrequest.ResourceType = "VPMemDevice" - // ResourceTypeVPCIDevice is the modify resource type for vpci devices - ResourceTypeVPCIDevice guestrequest.ResourceType = "VPCIDevice" - // ResourceTypeContainerConstraints is the modify resource type for updating - // container constraints - ResourceTypeContainerConstraints guestrequest.ResourceType = "ContainerConstraints" - ResourceTypeHvSocket guestrequest.ResourceType = "HvSocket" - // ResourceTypeSecurityPolicy is the modify resource type for updating the security - // policy - ResourceTypeSecurityPolicy guestrequest.ResourceType = "SecurityPolicy" -) - -// This class is used by a modify request to add or remove a combined layers -// structure in the guest. For windows, the GCS applies a filter in ContainerRootPath -// using the specified layers as the parent content. Ignores property ScratchPath -// since the container path is already the scratch path. For linux, the GCS unions -// the specified layers and ScratchPath together, placing the resulting union -// filesystem at ContainerRootPath. -type LCOWCombinedLayers struct { - ContainerID string `json:",omitempty"` - ContainerRootPath string `json:",omitempty"` - Layers []hcsschema.Layer `json:",omitempty"` - ScratchPath string `json:",omitempty"` -} - -type WCOWCombinedLayers struct { - ContainerRootPath string `json:"ContainerRootPath,omitempty"` - Layers []hcsschema.Layer `json:"Layers,omitempty"` - ScratchPath string `json:"ScratchPath,omitempty"` -} - -// Defines the schema for hosted settings passed to GCS and/or OpenGCS - -// LCOWMappedVirtualDisk represents a disk on the host which is mapped into a -// directory in the guest in the V2 schema. -type LCOWMappedVirtualDisk struct { - MountPath string `json:"MountPath,omitempty"` - Lun uint8 `json:"Lun,omitempty"` - Controller uint8 `json:"Controller,omitempty"` - ReadOnly bool `json:"ReadOnly,omitempty"` - Encrypted bool `json:"Encrypted,omitempty"` - Options []string `json:"Options,omitempty"` - VerityInfo *DeviceVerityInfo `json:"VerityInfo,omitempty"` -} - -type WCOWMappedVirtualDisk struct { - ContainerPath string `json:"ContainerPath,omitempty"` - Lun int32 `json:"Lun,omitempty"` -} - -// LCOWMappedDirectory represents a directory on the host which is mapped to a -// directory on the guest through Plan9 in the V2 schema. -type LCOWMappedDirectory struct { - MountPath string `json:"MountPath,omitempty"` - Port int32 `json:"Port,omitempty"` - ShareName string `json:"ShareName,omitempty"` // If empty not using ANames (not currently supported) - ReadOnly bool `json:"ReadOnly,omitempty"` -} - -// LCOWVPMemMappingInfo is one of potentially multiple read-only layers mapped on a VPMem device -type LCOWVPMemMappingInfo struct { - DeviceOffsetInBytes uint64 `json:"DeviceOffsetInBytes,omitempty"` - DeviceSizeInBytes uint64 `json:"DeviceSizeInBytes,omitempty"` -} - -// DeviceVerityInfo represents dm-verity metadata of a block device. -// Most of the fields can be directly mapped to table entries https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/verity.html -type DeviceVerityInfo struct { - // Ext4SizeInBytes is the size of ext4 file system - Ext4SizeInBytes int64 `json:",omitempty"` - // Version is the on-disk hash format - Version int `json:",omitempty"` - // Algorithm is the algo used to produce the hashes for dm-verity hash tree - Algorithm string `json:",omitempty"` - // SuperBlock is set to true if dm-verity super block is present on the device - SuperBlock bool `json:",omitempty"` - // RootDigest is the root hash of the dm-verity hash tree - RootDigest string `json:",omitempty"` - // Salt is the salt used to compute the root hash - Salt string `json:",omitempty"` - // BlockSize is the data device block size - BlockSize int `json:",omitempty"` -} - -// Read-only layers over VPMem -type LCOWMappedVPMemDevice struct { - DeviceNumber uint32 `json:"DeviceNumber,omitempty"` - MountPath string `json:"MountPath,omitempty"` - // MappingInfo is used when multiple devices are mapped onto a single VPMem device - MappingInfo *LCOWVPMemMappingInfo `json:"MappingInfo,omitempty"` - // VerityInfo is used when the VPMem has read-only integrity protection enabled - VerityInfo *DeviceVerityInfo `json:"VerityInfo,omitempty"` -} - -type LCOWMappedVPCIDevice struct { - VMBusGUID string `json:"VMBusGUID,omitempty"` -} - -// LCOWNetworkAdapter represents a network interface and its associated -// configuration in a namespace. -type LCOWNetworkAdapter struct { - NamespaceID string `json:",omitempty"` - ID string `json:",omitempty"` - MacAddress string `json:",omitempty"` - IPAddress string `json:",omitempty"` - PrefixLength uint8 `json:",omitempty"` - GatewayAddress string `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` - EnableLowMetric bool `json:",omitempty"` - EncapOverhead uint16 `json:",omitempty"` - VPCIAssigned bool `json:",omitempty"` -} - -type LCOWContainerConstraints struct { - Windows specs.WindowsResources `json:",omitempty"` - Linux specs.LinuxResources `json:",omitempty"` -} - -// SignalProcessOptionsLCOW is the options passed to LCOW to signal a given -// process. -type SignalProcessOptionsLCOW struct { - Signal int `json:",omitempty"` -} - -// SignalProcessOptionsWCOW is the options passed to WCOW to signal a given -// process. -type SignalProcessOptionsWCOW struct { - Signal guestrequest.SignalValueWCOW `json:",omitempty"` -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go b/test/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go deleted file mode 100644 index e177c9a629..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go +++ /dev/null @@ -1,111 +0,0 @@ -package queue - -import ( - "errors" - "sync" -) - -var ( - ErrQueueClosed = errors.New("the queue is closed for reading and writing") - ErrQueueEmpty = errors.New("the queue is empty") -) - -// MessageQueue represents a threadsafe message queue to be used to retrieve or -// write messages to. -type MessageQueue struct { - m *sync.RWMutex - c *sync.Cond - messages []interface{} - closed bool -} - -// NewMessageQueue returns a new MessageQueue. -func NewMessageQueue() *MessageQueue { - m := &sync.RWMutex{} - return &MessageQueue{ - m: m, - c: sync.NewCond(m), - messages: []interface{}{}, - } -} - -// Write writes `msg` to the queue. -func (mq *MessageQueue) Write(msg interface{}) error { - mq.m.Lock() - defer mq.m.Unlock() - - if mq.closed { - return ErrQueueClosed - } - mq.messages = append(mq.messages, msg) - // Signal a waiter that there is now a value available in the queue. - mq.c.Signal() - return nil -} - -// Read will read a value from the queue if available, otherwise return an error. -func (mq *MessageQueue) Read() (interface{}, error) { - mq.m.Lock() - defer mq.m.Unlock() - if mq.closed { - return nil, ErrQueueClosed - } - if mq.isEmpty() { - return nil, ErrQueueEmpty - } - val := mq.messages[0] - mq.messages[0] = nil - mq.messages = mq.messages[1:] - return val, nil -} - -// ReadOrWait will read a value from the queue if available, else it will wait for a -// value to become available. This will block forever if nothing gets written or until -// the queue gets closed. -func (mq *MessageQueue) ReadOrWait() (interface{}, error) { - mq.m.Lock() - if mq.closed { - mq.m.Unlock() - return nil, ErrQueueClosed - } - if mq.isEmpty() { - for !mq.closed && mq.isEmpty() { - mq.c.Wait() - } - mq.m.Unlock() - return mq.Read() - } - val := mq.messages[0] - mq.messages[0] = nil - mq.messages = mq.messages[1:] - mq.m.Unlock() - return val, nil -} - -// IsEmpty returns if the queue is empty -func (mq *MessageQueue) IsEmpty() bool { - mq.m.RLock() - defer mq.m.RUnlock() - return len(mq.messages) == 0 -} - -// Nonexported empty check that doesn't lock so we can call this in Read and Write. -func (mq *MessageQueue) isEmpty() bool { - return len(mq.messages) == 0 -} - -// Close closes the queue for future writes or reads. Any attempts to read or write from the -// queue after close will return ErrQueueClosed. This is safe to call multiple times. -func (mq *MessageQueue) Close() { - mq.m.Lock() - defer mq.m.Unlock() - // Already closed - if mq.closed { - return - } - mq.messages = nil - mq.closed = true - // If there's anybody currently waiting on a value from ReadOrWait, we need to - // broadcast so the read(s) can return ErrQueueClosed. - mq.c.Broadcast() -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/regstate/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/regstate/doc.go deleted file mode 100644 index 51bcdf6e98..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/regstate/doc.go +++ /dev/null @@ -1 +0,0 @@ -package regstate diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go b/test/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go deleted file mode 100644 index 184975add8..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go +++ /dev/null @@ -1,290 +0,0 @@ -//go:build windows - -package regstate - -import ( - "encoding/json" - "fmt" - "net/url" - "os" - "path/filepath" - "reflect" - "syscall" - - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/registry" -) - -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go regstate.go - -//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW - -const ( - _REG_OPTION_VOLATILE = 1 - - _REG_OPENED_EXISTING_KEY = 2 -) - -type Key struct { - registry.Key - Name string -} - -var localMachine = &Key{registry.LOCAL_MACHINE, "HKEY_LOCAL_MACHINE"} -var localUser = &Key{registry.CURRENT_USER, "HKEY_CURRENT_USER"} - -var rootPath = `SOFTWARE\Microsoft\runhcs` - -type NotFoundError struct { - ID string -} - -func (err *NotFoundError) Error() string { - return fmt.Sprintf("ID '%s' was not found", err.ID) -} - -func IsNotFoundError(err error) bool { - _, ok := err.(*NotFoundError) - return ok -} - -type NoStateError struct { - ID string - Key string -} - -func (err *NoStateError) Error() string { - return fmt.Sprintf("state '%s' is not present for ID '%s'", err.Key, err.ID) -} - -func createVolatileKey(k *Key, path string, access uint32) (newk *Key, openedExisting bool, err error) { - var ( - h syscall.Handle - d uint32 - ) - fullpath := filepath.Join(k.Name, path) - pathPtr, _ := windows.UTF16PtrFromString(path) - err = regCreateKeyEx(syscall.Handle(k.Key), pathPtr, 0, nil, _REG_OPTION_VOLATILE, access, nil, &h, &d) - if err != nil { - return nil, false, &os.PathError{Op: "RegCreateKeyEx", Path: fullpath, Err: err} - } - return &Key{registry.Key(h), fullpath}, d == _REG_OPENED_EXISTING_KEY, nil -} - -func hive(perUser bool) *Key { - r := localMachine - if perUser { - r = localUser - } - return r -} - -func Open(root string, perUser bool) (*Key, error) { - k, _, err := createVolatileKey(hive(perUser), rootPath, registry.ALL_ACCESS) - if err != nil { - return nil, err - } - defer k.Close() - - k2, _, err := createVolatileKey(k, url.PathEscape(root), registry.ALL_ACCESS) - if err != nil { - return nil, err - } - return k2, nil -} - -func RemoveAll(root string, perUser bool) error { - k, err := hive(perUser).open(rootPath) - if err != nil { - return err - } - defer k.Close() - r, err := k.open(url.PathEscape(root)) - if err != nil { - return err - } - defer r.Close() - ids, err := r.Enumerate() - if err != nil { - return err - } - for _, id := range ids { - err = r.Remove(id) - if err != nil { - return err - } - } - r.Close() - return k.Remove(root) -} - -func (k *Key) Close() error { - err := k.Key.Close() - k.Key = 0 - return err -} - -func (k *Key) Enumerate() ([]string, error) { - escapedIDs, err := k.ReadSubKeyNames(0) - if err != nil { - return nil, err - } - var ids []string - for _, e := range escapedIDs { - id, err := url.PathUnescape(e) - if err == nil { - ids = append(ids, id) - } - } - return ids, nil -} - -func (k *Key) open(name string) (*Key, error) { - fullpath := filepath.Join(k.Name, name) - nk, err := registry.OpenKey(k.Key, name, registry.ALL_ACCESS) - if err != nil { - return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err} - } - return &Key{nk, fullpath}, nil -} - -func (k *Key) openid(id string) (*Key, error) { - escaped := url.PathEscape(id) - fullpath := filepath.Join(k.Name, escaped) - nk, err := k.open(escaped) - if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { - return nil, &NotFoundError{id} - } - if err != nil { - return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err} - } - return nk, nil -} - -func (k *Key) Remove(id string) error { - escaped := url.PathEscape(id) - err := registry.DeleteKey(k.Key, escaped) - if err != nil { - if err == syscall.ERROR_FILE_NOT_FOUND { - return &NotFoundError{id} - } - return &os.PathError{Op: "RegDeleteKey", Path: filepath.Join(k.Name, escaped), Err: err} - } - return nil -} - -func (k *Key) set(id string, create bool, key string, state interface{}) error { - var sk *Key - var err error - if create { - var existing bool - eid := url.PathEscape(id) - sk, existing, err = createVolatileKey(k, eid, registry.ALL_ACCESS) - if err != nil { - return err - } - defer sk.Close() - if existing { - sk.Close() - return fmt.Errorf("container %s already exists", id) - } - } else { - sk, err = k.openid(id) - if err != nil { - return err - } - defer sk.Close() - } - switch reflect.TypeOf(state).Kind() { - case reflect.Bool: - v := uint32(0) - if state.(bool) { - v = 1 - } - err = sk.SetDWordValue(key, v) - case reflect.Int: - err = sk.SetQWordValue(key, uint64(state.(int))) - case reflect.String: - err = sk.SetStringValue(key, state.(string)) - default: - var js []byte - js, err = json.Marshal(state) - if err != nil { - return err - } - err = sk.SetBinaryValue(key, js) - } - if err != nil { - if err == syscall.ERROR_FILE_NOT_FOUND { - return &NoStateError{id, key} - } - return &os.PathError{Op: "RegSetValueEx", Path: sk.Name + ":" + key, Err: err} - } - return nil -} - -func (k *Key) Create(id, key string, state interface{}) error { - return k.set(id, true, key, state) -} - -func (k *Key) Set(id, key string, state interface{}) error { - return k.set(id, false, key, state) -} - -func (k *Key) Clear(id, key string) error { - sk, err := k.openid(id) - if err != nil { - return err - } - defer sk.Close() - err = sk.DeleteValue(key) - if err != nil { - if err == syscall.ERROR_FILE_NOT_FOUND { - return &NoStateError{id, key} - } - return &os.PathError{Op: "RegDeleteValue", Path: sk.Name + ":" + key, Err: err} - } - return nil -} - -func (k *Key) Get(id, key string, state interface{}) error { - sk, err := k.openid(id) - if err != nil { - return err - } - defer sk.Close() - - var js []byte - switch reflect.TypeOf(state).Elem().Kind() { - case reflect.Bool: - var v uint64 - v, _, err = sk.GetIntegerValue(key) - if err == nil { - *state.(*bool) = v != 0 - } - case reflect.Int: - var v uint64 - v, _, err = sk.GetIntegerValue(key) - if err == nil { - *state.(*int) = int(v) - } - case reflect.String: - var v string - v, _, err = sk.GetStringValue(key) - if err == nil { - *state.(*string) = string(v) - } - default: - js, _, err = sk.GetBinaryValue(key) - } - if err != nil { - if err == syscall.ERROR_FILE_NOT_FOUND { - return &NoStateError{id, key} - } - return &os.PathError{Op: "RegQueryValueEx", Path: sk.Name + ":" + key, Err: err} - } - if js != nil { - err = json.Unmarshal(js, state) - } - return err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/resources/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/resources/doc.go deleted file mode 100644 index 878cd99d0c..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/resources/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package resources handles creating, updating, and releasing resources -// on a container -package resources diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/resources/resources.go b/test/vendor/github.com/Microsoft/hcsshim/internal/resources/resources.go deleted file mode 100644 index 319c88461f..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/resources/resources.go +++ /dev/null @@ -1,167 +0,0 @@ -//go:build windows - -package resources - -import ( - "context" - "errors" - - "github.com/Microsoft/hcsshim/internal/credentials" - "github.com/Microsoft/hcsshim/internal/layers" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/uvm" -) - -// NetNS returns the network namespace for the container -func (r *Resources) NetNS() string { - return r.netNS -} - -// SetNetNS updates the container resource's NetNS -func (r *Resources) SetNetNS(netNS string) { - r.netNS = netNS -} - -// SetCreatedNetNS updates the container resource's CreatedNetNS value -func (r *Resources) SetCreatedNetNS(created bool) { - r.createdNetNS = true -} - -// ContainerRootInUVM returns the containerRootInUVM for the container -func (r *Resources) ContainerRootInUVM() string { - return r.containerRootInUVM -} - -// SetContainerRootInUVM updates the container resource's containerRootInUVM value -func (r *Resources) SetContainerRootInUVM(containerRootInUVM string) { - r.containerRootInUVM = containerRootInUVM -} - -// SetAddedNetNSToVM updates the container resource's AddedNetNSToVM value -func (r *Resources) SetAddedNetNSToVM(addedNetNSToVM bool) { - r.addedNetNSToVM = addedNetNSToVM -} - -func (r *Resources) SetLcowScratchPath(scratchPath string) { - r.lcowScratchPath = scratchPath -} - -func (r *Resources) LcowScratchPath() string { - return r.lcowScratchPath -} - -// SetLayers updates the container resource's image layers -func (r *Resources) SetLayers(l *layers.ImageLayers) { - r.layers = l -} - -// Add adds one or more resource closers to the resources struct to be -// tracked for release later on -func (r *Resources) Add(newResources ...ResourceCloser) { - r.resources = append(r.resources, newResources...) -} - -// Resources is the structure returned as part of creating a container. It holds -// nothing useful to clients, hence everything is lowercased. A client would use -// it in a call to ReleaseResources to ensure everything is cleaned up when a -// container exits. -type Resources struct { - id string - // containerRootInUVM is the base path in a utility VM where elements relating - // to a container are exposed. For example, the mounted filesystem; the runtime - // spec (in the case of LCOW); overlay and scratch (in the case of LCOW). - // - // For WCOW, this will be under wcowRootInUVM. For LCOW, this will be under - // lcowRootInUVM, this will also be the "OCI Bundle Path". - containerRootInUVM string - // lcowScratchPath represents the path inside the UVM at which the LCOW scratch - // directory is present. Usually, this is the path at which the container scratch - // VHD is mounted inside the UVM (`containerRootInUVM`). But in case of scratch - // sharing this is a directory under the UVM scratch directory. - lcowScratchPath string - - netNS string - // createNetNS indicates if the network namespace has been created - createdNetNS bool - // addedNetNSToVM indicates if the network namespace has been added to the containers utility VM - addedNetNSToVM bool - // layers is a pointer to a struct of the layers paths of a container - layers *layers.ImageLayers - // resources is a slice of the resources associated with a container - resources []ResourceCloser -} - -// ResourceCloser is a generic interface for the releasing of a resource. If a resource implements -// this interface(which they all should), freeing of that resource should entail one call to -// .Release(ctx) -type ResourceCloser interface { - Release(context.Context) error -} - -// NewContainerResources returns a new empty container Resources struct with the -// given container id -func NewContainerResources(id string) *Resources { - return &Resources{ - id: id, - } -} - -// ReleaseResources releases/frees all of the resources associated with a container. This includes -// Plan9 shares, vsmb mounts, pipe mounts, network endpoints, scsi mounts, vpci devices and layers. -// TODO: make method on Resources struct. -func ReleaseResources(ctx context.Context, r *Resources, vm *uvm.UtilityVM, all bool) error { - if vm != nil { - if r.addedNetNSToVM { - if err := vm.TearDownNetworking(ctx, r.netNS); err != nil { - log.G(ctx).Warn(err) - } - r.addedNetNSToVM = false - } - } - - releaseErr := false - // Release resources in reverse order so that the most recently - // added are cleaned up first. We don't return an error right away - // so that other resources still get cleaned up in the case of one - // or more failing. - for i := len(r.resources) - 1; i >= 0; i-- { - switch r.resources[i].(type) { - case *uvm.NetworkEndpoints: - if r.createdNetNS { - if err := r.resources[i].Release(ctx); err != nil { - log.G(ctx).WithError(err).Error("failed to release container resource") - releaseErr = true - } - r.createdNetNS = false - } - case *credentials.CCGResource: - if err := r.resources[i].Release(ctx); err != nil { - log.G(ctx).WithError(err).Error("failed to release container resource") - releaseErr = true - } - default: - // Don't need to check if vm != nil here anymore as they wouldnt - // have been added in the first place. All resources have embedded - // vm they belong to. - if all { - if err := r.resources[i].Release(ctx); err != nil { - log.G(ctx).WithError(err).Error("failed to release container resource") - releaseErr = true - } - } - } - } - r.resources = nil - if releaseErr { - return errors.New("failed to release one or more container resources") - } - - if r.layers != nil { - // TODO dcantah: Either make it so layers doesn't rely on the all bool for cleanup logic - // or find a way to factor out the all bool in favor of something else. - if err := r.layers.Release(ctx, all); err != nil { - return err - } - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go b/test/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go deleted file mode 100644 index 33c43e6c59..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "syscall" - "time" - - "github.com/Microsoft/go-winio/pkg/guid" -) - -// ContainerState represents the platform agnostic pieces relating to a -// running container's status and state -type ContainerState struct { - // Version is the OCI version for the container - Version string `json:"ociVersion"` - // ID is the container ID - ID string `json:"id"` - // InitProcessPid is the init process id in the parent namespace - InitProcessPid int `json:"pid"` - // Status is the current status of the container, running, paused, ... - Status string `json:"status"` - // Bundle is the path on the filesystem to the bundle - Bundle string `json:"bundle"` - // Rootfs is a path to a directory containing the container's root filesystem. - Rootfs string `json:"rootfs"` - // Created is the unix timestamp for the creation time of the container in UTC - Created time.Time `json:"created"` - // Annotations is the user defined annotations added to the config. - Annotations map[string]string `json:"annotations,omitempty"` - // The owner of the state directory (the owner of the container). - Owner string `json:"owner"` -} - -// GetErrorFromPipe returns reads from `pipe` and verifies if the operation -// returned success or error. If error converts that to an error and returns. If -// `p` is not nill will issue a `Kill` and `Wait` for exit. -func GetErrorFromPipe(pipe io.Reader, p *os.Process) error { - serr, err := ioutil.ReadAll(pipe) - if err != nil { - return err - } - - if bytes.Equal(serr, ShimSuccess) { - return nil - } - - extra := "" - if p != nil { - _ = p.Kill() - state, err := p.Wait() - if err != nil { - panic(err) - } - extra = fmt.Sprintf(", exit code %d", state.Sys().(syscall.WaitStatus).ExitCode) - } - if len(serr) == 0 { - return fmt.Errorf("unknown shim failure%s", extra) - } - - return errors.New(string(serr)) -} - -// VMPipePath returns the named pipe path for the vm shim. -func VMPipePath(hostUniqueID guid.GUID) string { - return SafePipePath("runhcs-vm-" + hostUniqueID.String()) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go b/test/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go deleted file mode 100644 index b3e443d600..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go +++ /dev/null @@ -1,45 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "encoding/json" - - "github.com/Microsoft/go-winio" -) - -// VMRequestOp is an operation that can be issued to a VM shim. -type VMRequestOp string - -const ( - // OpCreateContainer is a create container request. - OpCreateContainer VMRequestOp = "create" - // OpSyncNamespace is a `cni.NamespaceTypeGuest` sync request with the UVM. - OpSyncNamespace VMRequestOp = "sync" - // OpUnmountContainer is a container unmount request. - OpUnmountContainer VMRequestOp = "unmount" - // OpUnmountContainerDiskOnly is a container unmount disk request. - OpUnmountContainerDiskOnly VMRequestOp = "unmount-disk" -) - -// VMRequest is an operation request that is issued to a VM shim. -type VMRequest struct { - ID string - Op VMRequestOp -} - -// IssueVMRequest issues a request to a shim at the given pipe. -func IssueVMRequest(pipepath string, req *VMRequest) error { - pipe, err := winio.DialPipe(pipepath, nil) - if err != nil { - return err - } - defer pipe.Close() - if err := json.NewEncoder(pipe).Encode(req); err != nil { - return err - } - if err := GetErrorFromPipe(pipe, nil); err != nil { - return err - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go b/test/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go deleted file mode 100644 index f211d25e72..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go +++ /dev/null @@ -1 +0,0 @@ -package safefile diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/test/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go deleted file mode 100644 index e824bb7788..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go +++ /dev/null @@ -1,404 +0,0 @@ -//go:build windows - -package safefile - -import ( - "errors" - "io" - "os" - "path/filepath" - "strings" - "syscall" - "unicode/utf16" - "unsafe" - - "github.com/Microsoft/hcsshim/internal/longpath" - "github.com/Microsoft/hcsshim/internal/winapi" - - winio "github.com/Microsoft/go-winio" -) - -func OpenRoot(path string) (*os.File, error) { - longpath, err := longpath.LongAbs(path) - if err != nil { - return nil, err - } - return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING) -} - -func cleanGoStringRelativePath(path string) (string, error) { - path = filepath.Clean(path) - if strings.Contains(path, ":") { - // Since alternate data streams must follow the file they - // are attached to, finding one here (out of order) is invalid. - return "", errors.New("path contains invalid character `:`") - } - fspath := filepath.FromSlash(path) - if len(fspath) > 0 && fspath[0] == '\\' { - return "", errors.New("expected relative path") - } - return fspath, nil -} - -func ntRelativePath(path string) ([]uint16, error) { - fspath, err := cleanGoStringRelativePath(path) - if err != nil { - return nil, err - } - - path16 := utf16.Encode(([]rune)(fspath)) - if len(path16) > 32767 { - return nil, syscall.ENAMETOOLONG - } - - return path16, nil -} - -// openRelativeInternal opens a relative path from the given root, failing if -// any of the intermediate path components are reparse points. -func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { - var ( - h uintptr - iosb winapi.IOStatusBlock - oa winapi.ObjectAttributes - ) - - cleanRelativePath, err := cleanGoStringRelativePath(path) - if err != nil { - return nil, err - } - - if root == nil || root.Fd() == 0 { - return nil, errors.New("missing root directory") - } - - pathUnicode, err := winapi.NewUnicodeString(cleanRelativePath) - if err != nil { - return nil, err - } - - oa.Length = unsafe.Sizeof(oa) - oa.ObjectName = pathUnicode - oa.RootDirectory = uintptr(root.Fd()) - oa.Attributes = winapi.OBJ_DONT_REPARSE - status := winapi.NtCreateFile( - &h, - accessMask|syscall.SYNCHRONIZE, - &oa, - &iosb, - nil, - 0, - shareFlags, - createDisposition, - winapi.FILE_OPEN_FOR_BACKUP_INTENT|winapi.FILE_SYNCHRONOUS_IO_NONALERT|flags, - nil, - 0, - ) - if status != 0 { - return nil, winapi.RtlNtStatusToDosError(status) - } - - fullPath, err := longpath.LongAbs(filepath.Join(root.Name(), path)) - if err != nil { - syscall.Close(syscall.Handle(h)) - return nil, err - } - - return os.NewFile(h, fullPath), nil -} - -// OpenRelative opens a relative path from the given root, failing if -// any of the intermediate path components are reparse points. -func OpenRelative(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { - f, err := openRelativeInternal(path, root, accessMask, shareFlags, createDisposition, flags) - if err != nil { - err = &os.PathError{Op: "open", Path: filepath.Join(root.Name(), path), Err: err} - } - return f, err -} - -// LinkRelative creates a hard link from oldname to newname (relative to oldroot -// and newroot), failing if any of the intermediate path components are reparse -// points. -func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.File) error { - // Open the old file. - oldf, err := openRelativeInternal( - oldname, - oldroot, - syscall.FILE_WRITE_ATTRIBUTES, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - 0, - ) - if err != nil { - return &os.LinkError{Op: "link", Old: filepath.Join(oldroot.Name(), oldname), New: filepath.Join(newroot.Name(), newname), Err: err} - } - defer oldf.Close() - - // Open the parent of the new file. - var parent *os.File - parentPath := filepath.Dir(newname) - if parentPath != "." { - parent, err = openRelativeInternal( - parentPath, - newroot, - syscall.GENERIC_READ, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - winapi.FILE_DIRECTORY_FILE) - if err != nil { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err} - } - defer parent.Close() - - fi, err := winio.GetFileBasicInfo(parent) - if err != nil { - return err - } - if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: winapi.RtlNtStatusToDosError(winapi.STATUS_REPARSE_POINT_ENCOUNTERED)} - } - } else { - parent = newroot - } - - // Issue an NT call to create the link. This will be safe because NT will - // not open any more directories to create the link, so it cannot walk any - // more reparse points. - newbase := filepath.Base(newname) - newbase16, err := ntRelativePath(newbase) - if err != nil { - return err - } - - size := int(unsafe.Offsetof(winapi.FileLinkInformation{}.FileName)) + len(newbase16)*2 - linkinfoBuffer := winapi.LocalAlloc(0, size) - defer winapi.LocalFree(linkinfoBuffer) - - linkinfo := (*winapi.FileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) - linkinfo.RootDirectory = parent.Fd() - linkinfo.FileNameLength = uint32(len(newbase16) * 2) - copy(winapi.Uint16BufferToSlice(&linkinfo.FileName[0], len(newbase16)), newbase16) - - var iosb winapi.IOStatusBlock - status := winapi.NtSetInformationFile( - oldf.Fd(), - &iosb, - linkinfoBuffer, - uint32(size), - winapi.FileLinkInformationClass, - ) - if status != 0 { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: winapi.RtlNtStatusToDosError(status)} - } - - return nil -} - -// deleteOnClose marks a file to be deleted when the handle is closed. -func deleteOnClose(f *os.File) error { - disposition := winapi.FileDispositionInformationEx{Flags: winapi.FILE_DISPOSITION_DELETE} - var iosb winapi.IOStatusBlock - status := winapi.NtSetInformationFile( - f.Fd(), - &iosb, - uintptr(unsafe.Pointer(&disposition)), - uint32(unsafe.Sizeof(disposition)), - winapi.FileDispositionInformationExClass, - ) - if status != 0 { - return winapi.RtlNtStatusToDosError(status) - } - return nil -} - -// clearReadOnly clears the readonly attribute on a file. -func clearReadOnly(f *os.File) error { - bi, err := winio.GetFileBasicInfo(f) - if err != nil { - return err - } - if bi.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY == 0 { - return nil - } - sbi := winio.FileBasicInfo{ - FileAttributes: bi.FileAttributes &^ syscall.FILE_ATTRIBUTE_READONLY, - } - if sbi.FileAttributes == 0 { - sbi.FileAttributes = syscall.FILE_ATTRIBUTE_NORMAL - } - return winio.SetFileBasicInfo(f, &sbi) -} - -// RemoveRelative removes a file or directory relative to a root, failing if any -// intermediate path components are reparse points. -func RemoveRelative(path string, root *os.File) error { - f, err := openRelativeInternal( - path, - root, - winapi.FILE_READ_ATTRIBUTES|winapi.FILE_WRITE_ATTRIBUTES|winapi.DELETE, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - winapi.FILE_OPEN_REPARSE_POINT) - if err == nil { - defer f.Close() - err = deleteOnClose(f) - if err == syscall.ERROR_ACCESS_DENIED { - // Maybe the file is marked readonly. Clear the bit and retry. - _ = clearReadOnly(f) - err = deleteOnClose(f) - } - } - if err != nil { - return &os.PathError{Op: "remove", Path: filepath.Join(root.Name(), path), Err: err} - } - return nil -} - -// RemoveAllRelative removes a directory tree relative to a root, failing if any -// intermediate path components are reparse points. -func RemoveAllRelative(path string, root *os.File) error { - fi, err := LstatRelative(path, root) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - fileAttributes := fi.Sys().(*syscall.Win32FileAttributeData).FileAttributes - if fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY == 0 || fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { - // If this is a reparse point, it can't have children. Simple remove will do. - err := RemoveRelative(path, root) - if err == nil || os.IsNotExist(err) { - return nil - } - return err - } - - // It is necessary to use os.Open as Readdirnames does not work with - // OpenRelative. This is safe because the above lstatrelative fails - // if the target is outside the root, and we know this is not a - // symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check. - fd, err := os.Open(filepath.Join(root.Name(), path)) - if err != nil { - if os.IsNotExist(err) { - // Race. It was deleted between the Lstat and Open. - // Return nil per RemoveAll's docs. - return nil - } - return err - } - - // Remove contents & return first error. - for { - names, err1 := fd.Readdirnames(100) - for _, name := range names { - err1 := RemoveAllRelative(path+string(os.PathSeparator)+name, root) - if err == nil { - err = err1 - } - } - if err1 == io.EOF { - break - } - // If Readdirnames returned an error, use it. - if err == nil { - err = err1 - } - if len(names) == 0 { - break - } - } - fd.Close() - - // Remove directory. - err1 := RemoveRelative(path, root) - if err1 == nil || os.IsNotExist(err1) { - return nil - } - if err == nil { - err = err1 - } - return err -} - -// MkdirRelative creates a directory relative to a root, failing if any -// intermediate path components are reparse points. -func MkdirRelative(path string, root *os.File) error { - f, err := openRelativeInternal( - path, - root, - 0, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_CREATE, - winapi.FILE_DIRECTORY_FILE) - if err == nil { - f.Close() - } else { - err = &os.PathError{Op: "mkdir", Path: filepath.Join(root.Name(), path), Err: err} - } - return err -} - -// MkdirAllRelative creates each directory in the path relative to a root, failing if -// any existing intermediate path components are reparse points. -func MkdirAllRelative(path string, root *os.File) error { - pathParts := strings.Split(filepath.Clean(path), (string)(filepath.Separator)) - for index := range pathParts { - - partialPath := filepath.Join(pathParts[0 : index+1]...) - stat, err := LstatRelative(partialPath, root) - - if err != nil { - if os.IsNotExist(err) { - if err := MkdirRelative(partialPath, root); err != nil { - return err - } - continue - } - return err - } - - if !stat.IsDir() { - fullPath := filepath.Join(root.Name(), partialPath) - return &os.PathError{Op: "mkdir", Path: fullPath, Err: syscall.ENOTDIR} - } - } - - return nil -} - -// LstatRelative performs a stat operation on a file relative to a root, failing -// if any intermediate path components are reparse points. -func LstatRelative(path string, root *os.File) (os.FileInfo, error) { - f, err := openRelativeInternal( - path, - root, - winapi.FILE_READ_ATTRIBUTES, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - winapi.FILE_OPEN_REPARSE_POINT) - if err != nil { - return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err} - } - defer f.Close() - return f.Stat() -} - -// EnsureNotReparsePointRelative validates that a given file (relative to a -// root) and all intermediate path components are not a reparse points. -func EnsureNotReparsePointRelative(path string, root *os.File) error { - // Perform an open with OBJ_DONT_REPARSE but without specifying FILE_OPEN_REPARSE_POINT. - f, err := OpenRelative( - path, - root, - 0, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - 0) - if err != nil { - return err - } - f.Close() - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/doc.go deleted file mode 100644 index c1432114b1..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/doc.go +++ /dev/null @@ -1 +0,0 @@ -package schemaversion diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion.go b/test/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion.go deleted file mode 100644 index bb2fa62375..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion.go +++ /dev/null @@ -1,102 +0,0 @@ -//go:build windows -// +build windows - -package schemaversion - -import ( - "encoding/json" - "fmt" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/osversion" - "github.com/sirupsen/logrus" -) - -// SchemaV10 makes it easy for callers to get a v1.0 schema version object -func SchemaV10() *hcsschema.Version { - return &hcsschema.Version{Major: 1, Minor: 0} -} - -// SchemaV21 makes it easy for callers to get a v2.1 schema version object -func SchemaV21() *hcsschema.Version { - return &hcsschema.Version{Major: 2, Minor: 1} -} - -// SchemaV25 makes it easy for callers to get a v2.5 schema version object. -func SchemaV25() *hcsschema.Version { - return &hcsschema.Version{Major: 2, Minor: 5} -} - -// isSupported determines if a given schema version is supported -func IsSupported(sv *hcsschema.Version) error { - if IsV10(sv) { - return nil - } - if IsV21(sv) { - if osversion.Build() < osversion.RS5 { - return fmt.Errorf("unsupported on this Windows build") - } - return nil - } - - if IsV25(sv) { - if osversion.Build() < 20348 { // pending solution to quuestion over version numbers re osversion.V21H2 - return fmt.Errorf("unsupported on this Windows build") - } - return nil - } - return fmt.Errorf("unknown schema version %s", String(sv)) -} - -// IsV10 determines if a given schema version object is 1.0. This was the only thing -// supported in RS1..3. It lives on in RS5, but will be deprecated in a future release. -func IsV10(sv *hcsschema.Version) bool { - if sv.Major == 1 && sv.Minor == 0 { - return true - } - return false -} - -// IsV21 determines if a given schema version object is 2.0. This was introduced in -// RS4, but not fully implemented. Recommended for applications using HCS in RS5 -// onwards. -func IsV21(sv *hcsschema.Version) bool { - if sv.Major == 2 && sv.Minor == 1 { - return true - } - return false -} - -// V25 schema introduced much later. Required to support SNP. -func IsV25(sv *hcsschema.Version) bool { - if sv.Major == 2 && sv.Minor == 5 { - return true - } - return false -} - -// String returns a JSON encoding of a schema version object -func String(sv *hcsschema.Version) string { - b, err := json.Marshal(sv) - if err != nil { - return "" - } - return string(b[:]) -} - -// DetermineSchemaVersion works out what schema version to use based on build and -// requested option. -func DetermineSchemaVersion(requestedSV *hcsschema.Version) *hcsschema.Version { - sv := SchemaV10() - if osversion.Build() >= osversion.RS5 { - sv = SchemaV21() - } - if requestedSV != nil { - if err := IsSupported(requestedSV); err == nil { - sv = requestedSV - } else { - logrus.WithField("schemaVersion", requestedSV).Warn("Ignoring unsupported requested schema version") - } - } - return sv -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go b/test/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go deleted file mode 100644 index bfcc157699..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go +++ /dev/null @@ -1,192 +0,0 @@ -//go:build windows -// +build windows - -package security - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -type ( - accessMask uint32 - accessMode uint32 - desiredAccess uint32 - inheritMode uint32 - objectType uint32 - shareMode uint32 - securityInformation uint32 - trusteeForm uint32 - trusteeType uint32 -) - -type explicitAccess struct { - //nolint:structcheck - accessPermissions accessMask - //nolint:structcheck - accessMode accessMode - //nolint:structcheck - inheritance inheritMode - //nolint:structcheck - trustee trustee -} - -type trustee struct { - //nolint:unused,structcheck - multipleTrustee *trustee - //nolint:unused,structcheck - multipleTrusteeOperation int32 - trusteeForm trusteeForm - trusteeType trusteeType - name uintptr -} - -const ( - AccessMaskNone accessMask = 0 - AccessMaskRead accessMask = 1 << 31 // GENERIC_READ - AccessMaskWrite accessMask = 1 << 30 // GENERIC_WRITE - AccessMaskExecute accessMask = 1 << 29 // GENERIC_EXECUTE - AccessMaskAll accessMask = 1 << 28 // GENERIC_ALL - - accessMaskDesiredPermission = AccessMaskRead - - accessModeGrant accessMode = 1 - - desiredAccessReadControl desiredAccess = 0x20000 - desiredAccessWriteDac desiredAccess = 0x40000 - - gvmga = "GrantVmGroupAccess:" - - inheritModeNoInheritance inheritMode = 0x0 - inheritModeSubContainersAndObjectsInherit inheritMode = 0x3 - - objectTypeFileObject objectType = 0x1 - - securityInformationDACL securityInformation = 0x4 - - shareModeRead shareMode = 0x1 - shareModeWrite shareMode = 0x2 - - //nolint:stylecheck // ST1003 - sidVmGroup = "S-1-5-83-0" - - trusteeFormIsSid trusteeForm = 0 - - trusteeTypeWellKnownGroup trusteeType = 5 -) - -// GrantVmGroupAccess sets the DACL for a specified file or directory to -// include Grant ACE entries for the VM Group SID. This is a golang re- -// implementation of the same function in vmcompute, just not exported in -// RS5. Which kind of sucks. Sucks a lot :/ -func GrantVmGroupAccess(name string) error { //nolint:stylecheck // ST1003 - return GrantVmGroupAccessWithMask(name, accessMaskDesiredPermission) -} - -// GrantVmGroupAccessWithMask sets the desired DACL for a specified file or -// directory. -func GrantVmGroupAccessWithMask(name string, access accessMask) error { //nolint:stylecheck // ST1003 - if access == 0 || access<<4 != 0 { - return fmt.Errorf("invalid access mask: 0x%08x", access) - } - // Stat (to determine if `name` is a directory). - s, err := os.Stat(name) - if err != nil { - return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err) - } - - // Get a handle to the file/directory. Must defer Close on success. - fd, err := createFile(name, s.IsDir()) - if err != nil { - return err // Already wrapped - } - defer func() { - _ = syscall.CloseHandle(fd) - }() - - // Get the current DACL and Security Descriptor. Must defer LocalFree on success. - ot := objectTypeFileObject - si := securityInformationDACL - sd := uintptr(0) - origDACL := uintptr(0) - if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { - return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err) - } - defer func() { - _, _ = syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) - }() - - // Generate a new DACL which is the current DACL with the required ACEs added. - // Must defer LocalFree on success. - newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), access, origDACL) - if err != nil { - return err // Already wrapped - } - defer func() { - _, _ = syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) - }() - - // And finally use SetSecurityInfo to apply the updated DACL. - if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { - return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err) - } - - return nil -} - -// createFile is a helper function to call [Nt]CreateFile to get a handle to -// the file or directory. -func createFile(name string, isDir bool) (syscall.Handle, error) { - namep, err := syscall.UTF16FromString(name) - if err != nil { - return 0, fmt.Errorf("syscall.UTF16FromString %s: %w", name, err) - } - da := uint32(desiredAccessReadControl | desiredAccessWriteDac) - sm := uint32(shareModeRead | shareModeWrite) - fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL) - if isDir { - fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS) - } - fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) - if err != nil { - return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err) - } - return fd, nil -} - -// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added. -// The caller is responsible for LocalFree of the returned DACL on success. -func generateDACLWithAcesAdded(name string, isDir bool, desiredAccess accessMask, origDACL uintptr) (uintptr, error) { - // Generate pointers to the SIDs based on the string SIDs - sid, err := syscall.StringToSid(sidVmGroup) - if err != nil { - return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err) - } - - inheritance := inheritModeNoInheritance - if isDir { - inheritance = inheritModeSubContainersAndObjectsInherit - } - - eaArray := []explicitAccess{ - { - accessPermissions: desiredAccess, - accessMode: accessModeGrant, - inheritance: inheritance, - trustee: trustee{ - trusteeForm: trusteeFormIsSid, - trusteeType: trusteeTypeWellKnownGroup, - name: uintptr(unsafe.Pointer(sid)), - }, - }, - } - - modifiedDACL := uintptr(0) - if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { - return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err) - } - - return modifiedDACL, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go b/test/vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go deleted file mode 100644 index f0cdd7d20c..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package security - -//go:generate go run $GOPATH/src/golang.org/x/sys/windows/mkwinsyscall/mkwinsyscall.go -output zsyscall_windows.go syscall_windows.go - -//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo -//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo -//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) = advapi32.SetEntriesInAclW diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go b/test/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go deleted file mode 100644 index 4084680e0f..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package security - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - - procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") - procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") - procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") -) - -func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/shimdiag/shimdiag.go b/test/vendor/github.com/Microsoft/hcsshim/internal/shimdiag/shimdiag.go deleted file mode 100644 index 6d37c7b411..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/shimdiag/shimdiag.go +++ /dev/null @@ -1,89 +0,0 @@ -//go:build windows - -package shimdiag - -import ( - fmt "fmt" - "os" - "path/filepath" - "sort" - strings "strings" - - "github.com/Microsoft/go-winio" - "github.com/containerd/ttrpc" - "golang.org/x/sys/windows" -) - -const ( - shimPrefix = `\\.\pipe\ProtectedPrefix\Administrators\containerd-shim-` - shimSuffix = `-pipe` -) - -func findPipes(pattern string) ([]string, error) { - path := `\\.\pipe\*` - path16, err := windows.UTF16FromString(path) - if err != nil { - return nil, err - } - var data windows.Win32finddata - h, err := windows.FindFirstFile(&path16[0], &data) - if err != nil { - return nil, &os.PathError{Op: "FindFirstFile", Path: path, Err: err} - } - var names []string - for { - name := `\\.\pipe\` + windows.UTF16ToString(data.FileName[:]) - if matched, _ := filepath.Match(pattern, name); matched { - names = append(names, name) - } - err = windows.FindNextFile(h, &data) - if err == windows.ERROR_NO_MORE_FILES { - break - } - if err != nil { - return nil, &os.PathError{Op: "FindNextFile", Path: path, Err: err} - } - } - return names, nil -} - -func FindShims(name string) ([]string, error) { - pipes, err := findPipes(shimPrefix + name + "*" + shimSuffix) - if err != nil { - return nil, err - } - for i, p := range pipes { - pipes[i] = p[len(shimPrefix) : len(p)-len(shimSuffix)] - } - sort.Strings(pipes) - return pipes, nil -} - -func findShim(name string) (string, error) { - if strings.ContainsAny(name, "*?\\/") { - return "", fmt.Errorf("invalid shim name %s", name) - } - shims, err := FindShims(name) - if err != nil { - return "", err - } - if len(shims) == 0 { - return "", fmt.Errorf("no such shim %s", name) - } - if len(shims) > 1 && shims[0] != name { - return "", fmt.Errorf("multiple shims beginning with %s", name) - } - return shims[0], nil -} - -func GetShim(name string) (*ttrpc.Client, error) { - shim, err := findShim(name) - if err != nil { - return nil, err - } - conn, err := winio.DialPipe(shimPrefix+shim+shimSuffix, nil) - if err != nil { - return nil, err - } - return ttrpc.NewClient(conn), nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/shimdiag/shimdiag.pb.go b/test/vendor/github.com/Microsoft/hcsshim/internal/shimdiag/shimdiag.pb.go deleted file mode 100644 index 84ca8aa334..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/shimdiag/shimdiag.pb.go +++ /dev/null @@ -1,1913 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/Microsoft/hcsshim/internal/shimdiag/shimdiag.proto - -package shimdiag - -import ( - context "context" - fmt "fmt" - github_com_containerd_ttrpc "github.com/containerd/ttrpc" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ExecProcessRequest struct { - Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` - Workdir string `protobuf:"bytes,2,opt,name=workdir,proto3" json:"workdir,omitempty"` - Terminal bool `protobuf:"varint,3,opt,name=terminal,proto3" json:"terminal,omitempty"` - Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExecProcessRequest) Reset() { *m = ExecProcessRequest{} } -func (*ExecProcessRequest) ProtoMessage() {} -func (*ExecProcessRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c7933dc6ffbb8784, []int{0} -} -func (m *ExecProcessRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecProcessRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExecProcessRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecProcessRequest.Merge(m, src) -} -func (m *ExecProcessRequest) XXX_Size() int { - return m.Size() -} -func (m *ExecProcessRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExecProcessRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecProcessRequest proto.InternalMessageInfo - -type ExecProcessResponse struct { - ExitCode int32 `protobuf:"varint,1,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExecProcessResponse) Reset() { *m = ExecProcessResponse{} } -func (*ExecProcessResponse) ProtoMessage() {} -func (*ExecProcessResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c7933dc6ffbb8784, []int{1} -} -func (m *ExecProcessResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecProcessResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecProcessResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExecProcessResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecProcessResponse.Merge(m, src) -} -func (m *ExecProcessResponse) XXX_Size() int { - return m.Size() -} -func (m *ExecProcessResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExecProcessResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecProcessResponse proto.InternalMessageInfo - -type StacksRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StacksRequest) Reset() { *m = StacksRequest{} } -func (*StacksRequest) ProtoMessage() {} -func (*StacksRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c7933dc6ffbb8784, []int{2} -} -func (m *StacksRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StacksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StacksRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StacksRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StacksRequest.Merge(m, src) -} -func (m *StacksRequest) XXX_Size() int { - return m.Size() -} -func (m *StacksRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StacksRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StacksRequest proto.InternalMessageInfo - -type StacksResponse struct { - Stacks string `protobuf:"bytes,1,opt,name=stacks,proto3" json:"stacks,omitempty"` - GuestStacks string `protobuf:"bytes,2,opt,name=guest_stacks,json=guestStacks,proto3" json:"guest_stacks,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StacksResponse) Reset() { *m = StacksResponse{} } -func (*StacksResponse) ProtoMessage() {} -func (*StacksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c7933dc6ffbb8784, []int{3} -} -func (m *StacksResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StacksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StacksResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StacksResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StacksResponse.Merge(m, src) -} -func (m *StacksResponse) XXX_Size() int { - return m.Size() -} -func (m *StacksResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StacksResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StacksResponse proto.InternalMessageInfo - -type ShareRequest struct { - HostPath string `protobuf:"bytes,1,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"` - UvmPath string `protobuf:"bytes,2,opt,name=uvm_path,json=uvmPath,proto3" json:"uvm_path,omitempty"` - ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShareRequest) Reset() { *m = ShareRequest{} } -func (*ShareRequest) ProtoMessage() {} -func (*ShareRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c7933dc6ffbb8784, []int{4} -} -func (m *ShareRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ShareRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ShareRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ShareRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShareRequest.Merge(m, src) -} -func (m *ShareRequest) XXX_Size() int { - return m.Size() -} -func (m *ShareRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ShareRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ShareRequest proto.InternalMessageInfo - -type ShareResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShareResponse) Reset() { *m = ShareResponse{} } -func (*ShareResponse) ProtoMessage() {} -func (*ShareResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c7933dc6ffbb8784, []int{5} -} -func (m *ShareResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ShareResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ShareResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ShareResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShareResponse.Merge(m, src) -} -func (m *ShareResponse) XXX_Size() int { - return m.Size() -} -func (m *ShareResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ShareResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ShareResponse proto.InternalMessageInfo - -type PidRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PidRequest) Reset() { *m = PidRequest{} } -func (*PidRequest) ProtoMessage() {} -func (*PidRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c7933dc6ffbb8784, []int{6} -} -func (m *PidRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PidRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PidRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PidRequest.Merge(m, src) -} -func (m *PidRequest) XXX_Size() int { - return m.Size() -} -func (m *PidRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PidRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PidRequest proto.InternalMessageInfo - -type PidResponse struct { - Pid int32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PidResponse) Reset() { *m = PidResponse{} } -func (*PidResponse) ProtoMessage() {} -func (*PidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c7933dc6ffbb8784, []int{7} -} -func (m *PidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PidResponse.Merge(m, src) -} -func (m *PidResponse) XXX_Size() int { - return m.Size() -} -func (m *PidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PidResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ExecProcessRequest)(nil), "containerd.runhcs.v1.diag.ExecProcessRequest") - proto.RegisterType((*ExecProcessResponse)(nil), "containerd.runhcs.v1.diag.ExecProcessResponse") - proto.RegisterType((*StacksRequest)(nil), "containerd.runhcs.v1.diag.StacksRequest") - proto.RegisterType((*StacksResponse)(nil), "containerd.runhcs.v1.diag.StacksResponse") - proto.RegisterType((*ShareRequest)(nil), "containerd.runhcs.v1.diag.ShareRequest") - proto.RegisterType((*ShareResponse)(nil), "containerd.runhcs.v1.diag.ShareResponse") - proto.RegisterType((*PidRequest)(nil), "containerd.runhcs.v1.diag.PidRequest") - proto.RegisterType((*PidResponse)(nil), "containerd.runhcs.v1.diag.PidResponse") -} - -func init() { - proto.RegisterFile("github.com/Microsoft/hcsshim/internal/shimdiag/shimdiag.proto", fileDescriptor_c7933dc6ffbb8784) -} - -var fileDescriptor_c7933dc6ffbb8784 = []byte{ - // 534 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0x8e, 0x49, 0x93, 0xda, 0xd3, 0x50, 0xd0, 0x52, 0x21, 0xd7, 0x91, 0x4c, 0xb0, 0x04, 0x98, - 0x03, 0x8e, 0x28, 0x07, 0x0e, 0x88, 0x0b, 0x3f, 0x12, 0x08, 0x21, 0x42, 0x72, 0xa9, 0x38, 0x10, - 0x6d, 0xed, 0xc5, 0x5e, 0x35, 0xde, 0x0d, 0xbb, 0xeb, 0xd0, 0xde, 0x78, 0x0c, 0x1e, 0x80, 0x87, - 0xe9, 0x91, 0x23, 0x47, 0x9a, 0x27, 0x41, 0xbb, 0x5e, 0x07, 0x2a, 0x44, 0xda, 0x9e, 0x32, 0xdf, - 0xb7, 0xf3, 0xcd, 0xec, 0x7c, 0xb3, 0x31, 0x3c, 0xcd, 0xa9, 0x2a, 0xaa, 0x83, 0x24, 0xe5, 0xe5, - 0xf0, 0x2d, 0x4d, 0x05, 0x97, 0xfc, 0x93, 0x1a, 0x16, 0xa9, 0x94, 0x05, 0x2d, 0x87, 0x94, 0x29, - 0x22, 0x18, 0x9e, 0x0d, 0x35, 0xca, 0x28, 0xce, 0x57, 0x41, 0x32, 0x17, 0x5c, 0x71, 0xb4, 0x9b, - 0x72, 0xa6, 0x30, 0x65, 0x44, 0x64, 0x89, 0xa8, 0x58, 0x91, 0xca, 0x64, 0xf1, 0x30, 0xd1, 0x09, - 0xc1, 0x4e, 0xce, 0x73, 0x6e, 0xb2, 0x86, 0x3a, 0xaa, 0x05, 0xd1, 0x77, 0x07, 0xd0, 0xcb, 0x23, - 0x92, 0x8e, 0x04, 0x4f, 0x89, 0x94, 0x63, 0xf2, 0xb9, 0x22, 0x52, 0x21, 0x04, 0x1b, 0x58, 0xe4, - 0xd2, 0x77, 0x06, 0xed, 0xd8, 0x1b, 0x9b, 0x18, 0xf9, 0xb0, 0xf9, 0x85, 0x8b, 0xc3, 0x8c, 0x0a, - 0xff, 0xca, 0xc0, 0x89, 0xbd, 0x71, 0x03, 0x51, 0x00, 0xae, 0x22, 0xa2, 0xa4, 0x0c, 0xcf, 0xfc, - 0xf6, 0xc0, 0x89, 0xdd, 0xf1, 0x0a, 0xa3, 0x1d, 0xe8, 0x48, 0x95, 0x51, 0xe6, 0x6f, 0x18, 0x4d, - 0x0d, 0xd0, 0x4d, 0xe8, 0x4a, 0x95, 0xf1, 0x4a, 0xf9, 0x1d, 0x43, 0x5b, 0x64, 0x79, 0x22, 0x84, - 0xdf, 0x5d, 0xf1, 0x44, 0x88, 0x68, 0x0f, 0x6e, 0x9c, 0xb9, 0xa5, 0x9c, 0x73, 0x26, 0x09, 0xea, - 0x83, 0x47, 0x8e, 0xa8, 0x9a, 0xa6, 0x3c, 0x23, 0xbe, 0x33, 0x70, 0xe2, 0xce, 0xd8, 0xd5, 0xc4, - 0x73, 0x9e, 0x91, 0xe8, 0x1a, 0x5c, 0x9d, 0x28, 0x9c, 0x1e, 0x36, 0x43, 0x45, 0x6f, 0x60, 0xbb, - 0x21, 0xac, 0xde, 0xb4, 0xd3, 0x8c, 0x11, 0x9b, 0x76, 0x1a, 0xa1, 0xdb, 0xd0, 0xcb, 0xb5, 0x64, - 0x6a, 0x4f, 0xeb, 0x79, 0xb7, 0x0c, 0x57, 0x97, 0x88, 0x52, 0xe8, 0x4d, 0x0a, 0x2c, 0x48, 0xe3, - 0x58, 0x1f, 0xbc, 0x82, 0x4b, 0x35, 0x9d, 0x63, 0x55, 0xd8, 0x6a, 0xae, 0x26, 0x46, 0x58, 0x15, - 0x68, 0x17, 0xdc, 0x6a, 0x51, 0xd6, 0x67, 0xd6, 0xbb, 0x6a, 0x51, 0x9a, 0xa3, 0x3e, 0x78, 0x82, - 0xe0, 0x6c, 0xca, 0xd9, 0xec, 0xb8, 0x31, 0x4f, 0x13, 0xef, 0xd8, 0xec, 0xd8, 0x8c, 0x50, 0x37, - 0xa9, 0x2f, 0x1c, 0xf5, 0x00, 0x46, 0x34, 0x6b, 0x06, 0xba, 0x05, 0x5b, 0x06, 0xd9, 0x69, 0xae, - 0x43, 0x7b, 0x4e, 0x33, 0xeb, 0x83, 0x0e, 0xf7, 0xbe, 0xb5, 0xc1, 0x9d, 0x14, 0xb4, 0x7c, 0x41, - 0x71, 0x8e, 0x38, 0x6c, 0xeb, 0x5f, 0xed, 0xe3, 0x6b, 0xf6, 0x8a, 0x4b, 0x85, 0x1e, 0x24, 0xff, - 0x7d, 0x2e, 0xc9, 0xbf, 0x8f, 0x22, 0x48, 0x2e, 0x9a, 0x6e, 0xef, 0x83, 0x01, 0x74, 0xc3, 0xda, - 0x30, 0x14, 0xaf, 0x51, 0x9f, 0xd9, 0x53, 0x70, 0xff, 0x02, 0x99, 0xb6, 0xc5, 0x47, 0xf0, 0x4c, - 0x0b, 0x6d, 0x12, 0xba, 0xb7, 0x4e, 0xf7, 0xd7, 0xae, 0x82, 0xf8, 0xfc, 0x44, 0x5b, 0x7f, 0x1f, - 0x36, 0x75, 0xfd, 0x11, 0xcd, 0xd0, 0x9d, 0x35, 0xa2, 0x3f, 0x3b, 0x09, 0xee, 0x9e, 0x97, 0x56, - 0x57, 0x7e, 0xf6, 0xfe, 0xe4, 0x34, 0x6c, 0xfd, 0x3c, 0x0d, 0x5b, 0x5f, 0x97, 0xa1, 0x73, 0xb2, - 0x0c, 0x9d, 0x1f, 0xcb, 0xd0, 0xf9, 0xb5, 0x0c, 0x9d, 0x0f, 0x8f, 0x2f, 0xf7, 0x09, 0x78, 0xd2, - 0x04, 0xfb, 0xad, 0x83, 0xae, 0xf9, 0x53, 0x3f, 0xfa, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xb1, - 0xa6, 0x27, 0x46, 0x04, 0x00, 0x00, -} - -func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintShimdiag(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x32 - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintShimdiag(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x2a - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintShimdiag(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x22 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.Workdir) > 0 { - i -= len(m.Workdir) - copy(dAtA[i:], m.Workdir) - i = encodeVarintShimdiag(dAtA, i, uint64(len(m.Workdir))) - i-- - dAtA[i] = 0x12 - } - if len(m.Args) > 0 { - for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Args[iNdEx]) - copy(dAtA[i:], m.Args[iNdEx]) - i = encodeVarintShimdiag(dAtA, i, uint64(len(m.Args[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecProcessResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ExitCode != 0 { - i = encodeVarintShimdiag(dAtA, i, uint64(m.ExitCode)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *StacksRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StacksRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StacksRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *StacksResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StacksResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StacksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.GuestStacks) > 0 { - i -= len(m.GuestStacks) - copy(dAtA[i:], m.GuestStacks) - i = encodeVarintShimdiag(dAtA, i, uint64(len(m.GuestStacks))) - i-- - dAtA[i] = 0x12 - } - if len(m.Stacks) > 0 { - i -= len(m.Stacks) - copy(dAtA[i:], m.Stacks) - i = encodeVarintShimdiag(dAtA, i, uint64(len(m.Stacks))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ShareRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ShareRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ShareRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ReadOnly { - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.UvmPath) > 0 { - i -= len(m.UvmPath) - copy(dAtA[i:], m.UvmPath) - i = encodeVarintShimdiag(dAtA, i, uint64(len(m.UvmPath))) - i-- - dAtA[i] = 0x12 - } - if len(m.HostPath) > 0 { - i -= len(m.HostPath) - copy(dAtA[i:], m.HostPath) - i = encodeVarintShimdiag(dAtA, i, uint64(len(m.HostPath))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ShareResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ShareResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ShareResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *PidRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PidRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *PidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintShimdiag(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintShimdiag(dAtA []byte, offset int, v uint64) int { - offset -= sovShimdiag(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ExecProcessRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovShimdiag(uint64(l)) - } - } - l = len(m.Workdir) - if l > 0 { - n += 1 + l + sovShimdiag(uint64(l)) - } - if m.Terminal { - n += 2 - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovShimdiag(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovShimdiag(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovShimdiag(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExecProcessResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ExitCode != 0 { - n += 1 + sovShimdiag(uint64(m.ExitCode)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StacksRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StacksResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Stacks) - if l > 0 { - n += 1 + l + sovShimdiag(uint64(l)) - } - l = len(m.GuestStacks) - if l > 0 { - n += 1 + l + sovShimdiag(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ShareRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.HostPath) - if l > 0 { - n += 1 + l + sovShimdiag(uint64(l)) - } - l = len(m.UvmPath) - if l > 0 { - n += 1 + l + sovShimdiag(uint64(l)) - } - if m.ReadOnly { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ShareResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PidRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pid != 0 { - n += 1 + sovShimdiag(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovShimdiag(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozShimdiag(x uint64) (n int) { - return sovShimdiag(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ExecProcessRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecProcessRequest{`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `Workdir:` + fmt.Sprintf("%v", this.Workdir) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ExecProcessResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecProcessResponse{`, - `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StacksRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StacksRequest{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StacksResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StacksResponse{`, - `Stacks:` + fmt.Sprintf("%v", this.Stacks) + `,`, - `GuestStacks:` + fmt.Sprintf("%v", this.GuestStacks) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ShareRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ShareRequest{`, - `HostPath:` + fmt.Sprintf("%v", this.HostPath) + `,`, - `UvmPath:` + fmt.Sprintf("%v", this.UvmPath) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ShareResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ShareResponse{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PidRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PidRequest{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PidResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PidResponse{`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringShimdiag(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} - -type ShimDiagService interface { - DiagExecInHost(ctx context.Context, req *ExecProcessRequest) (*ExecProcessResponse, error) - DiagStacks(ctx context.Context, req *StacksRequest) (*StacksResponse, error) - DiagShare(ctx context.Context, req *ShareRequest) (*ShareResponse, error) - DiagPid(ctx context.Context, req *PidRequest) (*PidResponse, error) -} - -func RegisterShimDiagService(srv *github_com_containerd_ttrpc.Server, svc ShimDiagService) { - srv.Register("containerd.runhcs.v1.diag.ShimDiag", map[string]github_com_containerd_ttrpc.Method{ - "DiagExecInHost": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ExecProcessRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.DiagExecInHost(ctx, &req) - }, - "DiagStacks": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req StacksRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.DiagStacks(ctx, &req) - }, - "DiagShare": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ShareRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.DiagShare(ctx, &req) - }, - "DiagPid": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req PidRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.DiagPid(ctx, &req) - }, - }) -} - -type shimDiagClient struct { - client *github_com_containerd_ttrpc.Client -} - -func NewShimDiagClient(client *github_com_containerd_ttrpc.Client) ShimDiagService { - return &shimDiagClient{ - client: client, - } -} - -func (c *shimDiagClient) DiagExecInHost(ctx context.Context, req *ExecProcessRequest) (*ExecProcessResponse, error) { - var resp ExecProcessResponse - if err := c.client.Call(ctx, "containerd.runhcs.v1.diag.ShimDiag", "DiagExecInHost", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *shimDiagClient) DiagStacks(ctx context.Context, req *StacksRequest) (*StacksResponse, error) { - var resp StacksResponse - if err := c.client.Call(ctx, "containerd.runhcs.v1.diag.ShimDiag", "DiagStacks", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *shimDiagClient) DiagShare(ctx context.Context, req *ShareRequest) (*ShareResponse, error) { - var resp ShareResponse - if err := c.client.Call(ctx, "containerd.runhcs.v1.diag.ShimDiag", "DiagShare", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *shimDiagClient) DiagPid(ctx context.Context, req *PidRequest) (*PidResponse, error) { - var resp PidResponse - if err := c.client.Call(ctx, "containerd.runhcs.v1.diag.ShimDiag", "DiagPid", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} -func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecProcessRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShimdiag - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShimdiag - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workdir", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShimdiag - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShimdiag - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Workdir = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShimdiag - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShimdiag - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShimdiag - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShimdiag - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShimdiag - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShimdiag - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShimdiag(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShimdiag - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecProcessResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecProcessResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) - } - m.ExitCode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitCode |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipShimdiag(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShimdiag - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StacksRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StacksRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StacksRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipShimdiag(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShimdiag - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StacksResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StacksResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StacksResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stacks", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShimdiag - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShimdiag - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stacks = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GuestStacks", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShimdiag - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShimdiag - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GuestStacks = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShimdiag(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShimdiag - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ShareRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ShareRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ShareRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShimdiag - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShimdiag - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UvmPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShimdiag - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShimdiag - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UvmPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipShimdiag(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShimdiag - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ShareResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ShareResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ShareResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipShimdiag(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShimdiag - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PidRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PidRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PidRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipShimdiag(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShimdiag - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShimdiag - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipShimdiag(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShimdiag - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipShimdiag(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowShimdiag - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowShimdiag - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowShimdiag - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthShimdiag - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupShimdiag - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthShimdiag - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthShimdiag = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowShimdiag = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupShimdiag = fmt.Errorf("proto: unexpected end of group") -) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/capabilities.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/capabilities.go deleted file mode 100644 index 50ac874bce..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/capabilities.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build windows - -package uvm - -import "github.com/Microsoft/hcsshim/internal/hcs/schema1" - -// SignalProcessSupported returns `true` if the guest supports the capability to -// signal a process. -// -// This support was added RS5+ guests. -func (uvm *UtilityVM) SignalProcessSupported() bool { - return uvm.guestCaps.SignalProcessSupported -} - -func (uvm *UtilityVM) DeleteContainerStateSupported() bool { - if uvm.gc == nil { - return false - } - return uvm.guestCaps.DeleteContainerStateSupported -} - -// Capabilities returns the protocol version and the guest defined capabilities. -// This should only be used for testing. -func (uvm *UtilityVM) Capabilities() (uint32, schema1.GuestDefinedCapabilities) { - return uvm.protocol, uvm.guestCaps -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/clone.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/clone.go deleted file mode 100644 index 010bac3145..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/clone.go +++ /dev/null @@ -1,141 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - - "github.com/Microsoft/hcsshim/internal/cow" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/pkg/errors" -) - -const ( - hcsComputeSystemSaveType = "AsTemplate" - // default namespace ID used for all template and clone VMs. - DefaultCloneNetworkNamespaceID = "89EB8A86-E253-41FD-9800-E6D88EB2E18A" -) - -// Cloneable is a generic interface for cloning a specific resource. Not all resources can -// be cloned and so all resources might not implement this interface. This interface is -// mainly used during late cloning process to clone the resources associated with the UVM -// and the container. For some resources (like scratch VHDs of the UVM & container) -// cloning means actually creating a copy of that resource while for some resources it -// simply means adding that resource to the cloned VM without copying (like VSMB shares). -// The Clone function of that resource will deal with these details. -type Cloneable interface { - // A resource that supports cloning should also support serialization and - // deserialization operations. This is because during resource cloning a resource - // is usually serialized in one process and then deserialized and cloned in some - // other process. Care should be taken while serializing a resource to not include - // any state that will not be valid during the deserialization step. By default - // gob encoding is used to serialize and deserialize resources but a resource can - // implement `gob.GobEncoder` & `gob.GobDecoder` interfaces to provide its own - // serialization and deserialization functions. - - // A SerialVersionID is an identifier used to recognize a unique version of a - // resource. Every time the definition of the resource struct changes this ID is - // bumped up. This ID is used to ensure that we serialize and deserialize the - // same version of a resource. - GetSerialVersionID() uint32 - - // Clone function creates a clone of the resource on the UVM `vm` (i.e adds the - // cloned resource to the `vm`) - // `cd` parameter can be used to pass any other data that is required during the - // cloning process of that resource (for example, when cloning SCSI Mounts we - // might need scratchFolder). - // Clone function should be called on a valid struct (Mostly on the struct which - // is deserialized, and so Clone function should only depend on the fields that - // are exported in the struct). - // The implementation of the clone function should avoid reading any data from the - // `vm` struct, it can add new fields to the vm struct but since the vm struct - // isn't fully ready at this point it shouldn't be used to read any data. - Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) error -} - -// A struct to keep all the information that might be required during cloning process of -// a resource. -type cloneData struct { - // doc spec for the clone - doc *hcsschema.ComputeSystem - // scratchFolder of the clone - scratchFolder string - // UVMID of the clone - uvmID string -} - -// UVMTemplateConfig is just a wrapper struct that keeps together all the resources that -// need to be saved to create a template. -type UVMTemplateConfig struct { - // ID of the template vm - UVMID string - // Array of all resources that will be required while making a clone from this template - Resources []Cloneable - // The OptionsWCOW used for template uvm creation - CreateOpts OptionsWCOW -} - -// Captures all the information that is necessary to properly save this UVM as a template -// and create clones from this template later. The struct returned by this method must be -// later on made available while creating a clone from this template. -func (uvm *UtilityVM) GenerateTemplateConfig() (*UVMTemplateConfig, error) { - if _, ok := uvm.createOpts.(OptionsWCOW); !ok { - return nil, fmt.Errorf("template config can only be created for a WCOW uvm") - } - - // Add all the SCSI Mounts and VSMB shares into the list of clones - templateConfig := &UVMTemplateConfig{ - UVMID: uvm.ID(), - CreateOpts: uvm.createOpts.(OptionsWCOW), - } - - for _, vsmbShare := range uvm.vsmbDirShares { - templateConfig.Resources = append(templateConfig.Resources, vsmbShare) - } - - for _, vsmbShare := range uvm.vsmbFileShares { - templateConfig.Resources = append(templateConfig.Resources, vsmbShare) - } - - for _, location := range uvm.scsiLocations { - for _, scsiMount := range location { - if scsiMount != nil { - templateConfig.Resources = append(templateConfig.Resources, scsiMount) - } - } - } - - return templateConfig, nil -} - -// Pauses the uvm and then saves it as a template. This uvm can not be restarted or used -// after it is successfully saved. -// uvm must be in the paused state before it can be saved as a template.save call will throw -// an incorrect uvm state exception if uvm is not in the paused state at the time of saving. -func (uvm *UtilityVM) SaveAsTemplate(ctx context.Context) error { - if err := uvm.hcsSystem.Pause(ctx); err != nil { - return errors.Wrap(err, "error pausing the VM") - } - - saveOptions := hcsschema.SaveOptions{ - SaveType: hcsComputeSystemSaveType, - } - if err := uvm.hcsSystem.Save(ctx, saveOptions); err != nil { - return errors.Wrap(err, "error saving the VM") - } - return nil -} - -// CloneContainer attaches back to a container that is already running inside the UVM -// because of the clone -func (uvm *UtilityVM) CloneContainer(ctx context.Context, id string) (cow.Container, error) { - if uvm.gc == nil { - return nil, fmt.Errorf("clone container cannot work without external GCS connection") - } - c, err := uvm.gc.CloneContainer(ctx, id) - if err != nil { - return nil, fmt.Errorf("failed to clone container %s: %s", id, err) - } - return c, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/combine_layers.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/combine_layers.go deleted file mode 100644 index 468139c0f7..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/combine_layers.go +++ /dev/null @@ -1,91 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/protocol/guestresource" -) - -// CombineLayersWCOW combines `layerPaths` with `containerRootPath` into the -// container file system. -// -// Note: `layerPaths` and `containerRootPath` are paths from within the UVM. -func (uvm *UtilityVM) CombineLayersWCOW(ctx context.Context, layerPaths []hcsschema.Layer, containerRootPath string) error { - if uvm.operatingSystem != "windows" { - return errNotSupported - } - msr := &hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeCombinedLayers, - RequestType: guestrequest.RequestTypeAdd, - Settings: guestresource.WCOWCombinedLayers{ - ContainerRootPath: containerRootPath, - Layers: layerPaths, - }, - }, - } - return uvm.modify(ctx, msr) -} - -// CombineLayersLCOW combines `layerPaths` and optionally `scratchPath` into an -// overlay filesystem at `rootfsPath`. If `scratchPath` is empty the overlay -// will be read only. -// -// NOTE: `layerPaths`, `scrathPath`, and `rootfsPath` are paths from within the -// UVM. -func (uvm *UtilityVM) CombineLayersLCOW(ctx context.Context, containerID string, layerPaths []string, scratchPath, rootfsPath string) error { - if uvm.operatingSystem != "linux" { - return errNotSupported - } - - var layers []hcsschema.Layer - for _, l := range layerPaths { - layers = append(layers, hcsschema.Layer{Path: l}) - } - msr := &hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeCombinedLayers, - RequestType: guestrequest.RequestTypeAdd, - Settings: guestresource.LCOWCombinedLayers{ - ContainerID: containerID, - ContainerRootPath: rootfsPath, - Layers: layers, - ScratchPath: scratchPath, - }, - }, - } - return uvm.modify(ctx, msr) -} - -// RemoveCombinedLayers removes the previously combined layers at `rootfsPath`. -// -// NOTE: `rootfsPath` is the path from within the UVM. -func (uvm *UtilityVM) RemoveCombinedLayersWCOW(ctx context.Context, rootfsPath string) error { - msr := &hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeCombinedLayers, - RequestType: guestrequest.RequestTypeRemove, - Settings: guestresource.WCOWCombinedLayers{ - ContainerRootPath: rootfsPath, - }, - }, - } - return uvm.modify(ctx, msr) -} - -func (uvm *UtilityVM) RemoveCombinedLayersLCOW(ctx context.Context, rootfsPath string) error { - msr := &hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeCombinedLayers, - RequestType: guestrequest.RequestTypeRemove, - Settings: guestresource.LCOWCombinedLayers{ - ContainerRootPath: rootfsPath, - }, - }, - } - return uvm.modify(ctx, msr) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/computeagent.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/computeagent.go deleted file mode 100644 index 44b328ad37..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/computeagent.go +++ /dev/null @@ -1,259 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "strings" - - "github.com/Microsoft/go-winio" - "github.com/containerd/ttrpc" - "github.com/containerd/typeurl" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/Microsoft/hcsshim/hcn" - "github.com/Microsoft/hcsshim/internal/computeagent" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/hns" - "github.com/Microsoft/hcsshim/internal/log" - ncproxynetworking "github.com/Microsoft/hcsshim/internal/ncproxy/networking" - "github.com/Microsoft/hcsshim/internal/protocol/guestresource" - "github.com/Microsoft/hcsshim/pkg/octtrpc" -) - -func init() { - typeurl.Register(&ncproxynetworking.Endpoint{}, "ncproxy/ncproxynetworking/Endpoint") - typeurl.Register(&ncproxynetworking.Network{}, "ncproxy/ncproxynetworking/Network") - typeurl.Register(&hcn.HostComputeEndpoint{}, "ncproxy/hcn/HostComputeEndpoint") - typeurl.Register(&hcn.HostComputeNetwork{}, "ncproxy/hcn/HostComputeNetwork") -} - -// This file holds the implementation of the Compute Agent service that is exposed for -// external network configuration. - -const ComputeAgentAddrFmt = "\\\\.\\pipe\\computeagent-%s" - -// create an interface here so we can mock out calls to the UtilityVM in our tests -type agentComputeSystem interface { - AddEndpointToNSWithID(context.Context, string, string, *hns.HNSEndpoint) error - UpdateNIC(context.Context, string, *hcsschema.NetworkAdapter) error - RemoveEndpointFromNS(context.Context, string, *hns.HNSEndpoint) error - AssignDevice(context.Context, string, uint16, string) (*VPCIDevice, error) - RemoveDevice(context.Context, string, uint16) error - AddNICInGuest(context.Context, *guestresource.LCOWNetworkAdapter) error - RemoveNICInGuest(context.Context, *guestresource.LCOWNetworkAdapter) error -} - -var _ agentComputeSystem = &UtilityVM{} - -// mock hcn function for tests -var hnsGetHNSEndpointByName = hns.GetHNSEndpointByName - -// computeAgent implements the ComputeAgent ttrpc service for adding and deleting NICs to a -// Utility VM. -type computeAgent struct { - uvm agentComputeSystem -} - -var _ computeagent.ComputeAgentService = &computeAgent{} - -func (ca *computeAgent) AssignPCI(ctx context.Context, req *computeagent.AssignPCIInternalRequest) (*computeagent.AssignPCIInternalResponse, error) { - log.G(ctx).WithFields(logrus.Fields{ - "containerID": req.ContainerID, - "deviceID": req.DeviceID, - "virtualFunctionIndex": req.VirtualFunctionIndex, - }).Info("AssignPCI request") - - if req.DeviceID == "" { - return nil, status.Error(codes.InvalidArgument, "received empty field in request") - } - - dev, err := ca.uvm.AssignDevice(ctx, req.DeviceID, uint16(req.VirtualFunctionIndex), req.NicID) - if err != nil { - return nil, err - } - return &computeagent.AssignPCIInternalResponse{ID: dev.VMBusGUID}, nil -} - -func (ca *computeAgent) RemovePCI(ctx context.Context, req *computeagent.RemovePCIInternalRequest) (*computeagent.RemovePCIInternalResponse, error) { - log.G(ctx).WithFields(logrus.Fields{ - "containerID": req.ContainerID, - "deviceID": req.DeviceID, - }).Info("RemovePCI request") - - if req.DeviceID == "" { - return nil, status.Error(codes.InvalidArgument, "received empty field in request") - } - if err := ca.uvm.RemoveDevice(ctx, req.DeviceID, uint16(req.VirtualFunctionIndex)); err != nil { - return nil, err - } - return &computeagent.RemovePCIInternalResponse{}, nil -} - -// AddNIC will add a NIC to the computeagent services hosting UVM. -func (ca *computeAgent) AddNIC(ctx context.Context, req *computeagent.AddNICInternalRequest) (*computeagent.AddNICInternalResponse, error) { - log.G(ctx).WithFields(logrus.Fields{ - "containerID": req.ContainerID, - "endpoint": req.Endpoint, - "nicID": req.NicID, - }).Info("AddNIC request") - - if req.NicID == "" || req.Endpoint == nil { - return nil, status.Error(codes.InvalidArgument, "received empty field in request") - } - - endpoint, err := typeurl.UnmarshalAny(req.Endpoint) - if err != nil { - return nil, err - } - - switch endpt := endpoint.(type) { - case *ncproxynetworking.Endpoint: - cfg := &guestresource.LCOWNetworkAdapter{ - NamespaceID: endpt.NamespaceID, - ID: req.NicID, - IPAddress: endpt.Settings.IPAddress, - PrefixLength: uint8(endpt.Settings.IPAddressPrefixLength), - GatewayAddress: endpt.Settings.DefaultGateway, - VPCIAssigned: true, - } - if err := ca.uvm.AddNICInGuest(ctx, cfg); err != nil { - return nil, err - } - case *hcn.HostComputeEndpoint: - hnsEndpoint, err := hnsGetHNSEndpointByName(endpt.Name) - if err != nil { - return nil, errors.Wrapf(err, "failed to get endpoint with name %q", endpt.Name) - } - if err := ca.uvm.AddEndpointToNSWithID(ctx, hnsEndpoint.Namespace.ID, req.NicID, hnsEndpoint); err != nil { - return nil, err - } - default: - return nil, status.Error(codes.InvalidArgument, "invalid request endpoint type") - } - - return &computeagent.AddNICInternalResponse{}, nil -} - -// ModifyNIC will modify a NIC from the computeagent services hosting UVM. -func (ca *computeAgent) ModifyNIC(ctx context.Context, req *computeagent.ModifyNICInternalRequest) (*computeagent.ModifyNICInternalResponse, error) { - log.G(ctx).WithFields(logrus.Fields{ - "nicID": req.NicID, - "endpoint": req.Endpoint, - }).Info("ModifyNIC request") - - if req.NicID == "" || req.Endpoint == nil || req.IovPolicySettings == nil { - return nil, status.Error(codes.InvalidArgument, "received empty field in request") - } - - endpoint, err := typeurl.UnmarshalAny(req.Endpoint) - if err != nil { - return nil, err - } - - switch endpt := endpoint.(type) { - case *ncproxynetworking.Endpoint: - return nil, errors.New("modifying ncproxy networking endpoints is not supported") - case *hcn.HostComputeEndpoint: - hnsEndpoint, err := hnsGetHNSEndpointByName(endpt.Name) - if err != nil { - return nil, errors.Wrapf(err, "failed to get endpoint with name `%s`", endpt.Name) - } - - moderationValue := hcsschema.InterruptModerationValue(req.IovPolicySettings.InterruptModeration) - moderationName := hcsschema.InterruptModerationValueToName[moderationValue] - - iovSettings := &hcsschema.IovSettings{ - OffloadWeight: &req.IovPolicySettings.IovOffloadWeight, - QueuePairsRequested: &req.IovPolicySettings.QueuePairsRequested, - InterruptModeration: &moderationName, - } - - nic := &hcsschema.NetworkAdapter{ - EndpointId: hnsEndpoint.Id, - MacAddress: hnsEndpoint.MacAddress, - IovSettings: iovSettings, - } - - if err := ca.uvm.UpdateNIC(ctx, req.NicID, nic); err != nil { - return nil, errors.Wrap(err, "failed to update UVM's network adapter") - } - default: - return nil, status.Error(codes.InvalidArgument, "invalid request endpoint type") - } - - return &computeagent.ModifyNICInternalResponse{}, nil -} - -// DeleteNIC will delete a NIC from the computeagent services hosting UVM. -func (ca *computeAgent) DeleteNIC(ctx context.Context, req *computeagent.DeleteNICInternalRequest) (*computeagent.DeleteNICInternalResponse, error) { - log.G(ctx).WithFields(logrus.Fields{ - "containerID": req.ContainerID, - "nicID": req.NicID, - "endpoint": req.Endpoint, - }).Info("DeleteNIC request") - - if req.NicID == "" || req.Endpoint == nil { - return nil, status.Error(codes.InvalidArgument, "received empty field in request") - } - - endpoint, err := typeurl.UnmarshalAny(req.Endpoint) - if err != nil { - return nil, err - } - - switch endpt := endpoint.(type) { - case *ncproxynetworking.Endpoint: - cfg := &guestresource.LCOWNetworkAdapter{ - ID: req.NicID, - } - if err := ca.uvm.RemoveNICInGuest(ctx, cfg); err != nil { - return nil, err - } - case *hcn.HostComputeEndpoint: - hnsEndpoint, err := hnsGetHNSEndpointByName(endpt.Name) - if err != nil { - return nil, errors.Wrapf(err, "failed to get endpoint with name %q", endpt.Name) - } - if err := ca.uvm.RemoveEndpointFromNS(ctx, hnsEndpoint.Namespace.ID, hnsEndpoint); err != nil { - return nil, err - } - default: - return nil, status.Error(codes.InvalidArgument, "invalid request endpoint type") - } - - return &computeagent.DeleteNICInternalResponse{}, nil -} - -func setupAndServe(ctx context.Context, caAddr string, vm *UtilityVM) error { - // Setup compute agent service - l, err := winio.ListenPipe(caAddr, nil) - if err != nil { - return errors.Wrapf(err, "failed to listen on %s", caAddr) - } - s, err := ttrpc.NewServer(ttrpc.WithUnaryServerInterceptor(octtrpc.ServerInterceptor())) - if err != nil { - return err - } - computeagent.RegisterComputeAgentService(s, &computeAgent{vm}) - - log.G(ctx).WithField("address", l.Addr().String()).Info("serving compute agent") - go func() { - defer l.Close() - if err := trapClosedConnErr(s.Serve(ctx, l)); err != nil { - log.G(ctx).WithError(err).Fatal("compute agent: serve failure") - } - }() - - return nil -} - -func trapClosedConnErr(err error) error { - if err == nil || strings.Contains(err.Error(), "use of closed network connection") { - return nil - } - return err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go deleted file mode 100644 index aebd58a391..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go +++ /dev/null @@ -1,30 +0,0 @@ -package uvm - -import ( - "errors" - - "github.com/Microsoft/hcsshim/internal/memory" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" -) - -const ( - // MaxVPMEMCount is the maximum number of VPMem devices that may be added to an LCOW - // utility VM - MaxVPMEMCount = 128 - - // DefaultVPMEMCount is the default number of VPMem devices that may be added to an LCOW - // utility VM if the create request doesn't specify how many. - DefaultVPMEMCount = 64 - - // DefaultVPMemSizeBytes is the default size of a VPMem device if the create request - // doesn't specify. - DefaultVPMemSizeBytes = 4 * memory.GiB // 4GB -) - -var ( - errNotSupported = errors.New("not supported") - errBadUVMOpts = errors.New("UVM options incorrect") - - // Maximum number of SCSI controllers allowed - MaxSCSIControllers = uint32(len(guestrequest.ScsiControllerGuids)) -) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/counter.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/counter.go deleted file mode 100644 index fd49be9bfc..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/counter.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build windows - -package uvm - -import ( - "sync/atomic" -) - -// ContainerCounter is used for where we layout things for a container in -// a utility VM. For WCOW it'll be C:\c\N\. For LCOW it'll be /run/gcs/c/N/. -func (uvm *UtilityVM) ContainerCounter() uint64 { - return atomic.AddUint64(&uvm.containerCounter, 1) -} - -// mountCounter is used for maintaining the number of mounts to the UVM. -// This helps in generating unique mount paths for every mount. -func (uvm *UtilityVM) UVMMountCounter() uint64 { - return atomic.AddUint64(&uvm.mountCounter, 1) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/cpugroups.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/cpugroups.go deleted file mode 100644 index f93a83ca6e..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/cpugroups.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "errors" - "fmt" - - "github.com/Microsoft/hcsshim/internal/cpugroup" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/osversion" -) - -var errCPUGroupCreateNotSupported = fmt.Errorf("cpu group assignment on create requires a build of %d or higher", osversion.V21H1) - -// ReleaseCPUGroup unsets the cpugroup from the VM -func (uvm *UtilityVM) ReleaseCPUGroup(ctx context.Context) error { - if err := uvm.unsetCPUGroup(ctx); err != nil { - return fmt.Errorf("failed to remove VM %s from cpugroup", uvm.ID()) - } - return nil -} - -// SetCPUGroup setups up the cpugroup for the VM with the requested id -func (uvm *UtilityVM) SetCPUGroup(ctx context.Context, id string) error { - if id == "" { - return errors.New("must specify an ID to use when configuring a VM's cpugroup") - } - return uvm.setCPUGroup(ctx, id) -} - -// setCPUGroup sets the VM's cpugroup -func (uvm *UtilityVM) setCPUGroup(ctx context.Context, id string) error { - req := &hcsschema.ModifySettingRequest{ - ResourcePath: resourcepaths.CPUGroupResourcePath, - Settings: &hcsschema.CpuGroup{ - Id: id, - }, - } - if err := uvm.modify(ctx, req); err != nil { - return err - } - return nil -} - -// unsetCPUGroup sets the VM's cpugroup to the null group ID -// set groupID to 00000000-0000-0000-0000-000000000000 to remove the VM from a cpugroup -// -// Since a VM must be moved to the null group before potentially being added to a different -// cpugroup, that means there may be a segment of time that the VM's cpu usage runs unrestricted. -func (uvm *UtilityVM) unsetCPUGroup(ctx context.Context) error { - return uvm.setCPUGroup(ctx, cpugroup.NullGroupID) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/cpulimits_update.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/cpulimits_update.go deleted file mode 100644 index ea5fccf625..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/cpulimits_update.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// UpdateCPULimits updates the CPU limits of the utility vm -func (uvm *UtilityVM) UpdateCPULimits(ctx context.Context, limits *hcsschema.ProcessorLimits) error { - req := &hcsschema.ModifySettingRequest{ - ResourcePath: resourcepaths.CPULimitsResourcePath, - Settings: limits, - } - - return uvm.modify(ctx, req) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go deleted file mode 100644 index 9b36372ef0..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go +++ /dev/null @@ -1,408 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" - "golang.org/x/sys/windows" - - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/hcs" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/schemaversion" - "github.com/Microsoft/hcsshim/osversion" -) - -// Options are the set of options passed to Create() to create a utility vm. -type Options struct { - ID string // Identifier for the uvm. Defaults to generated GUID. - Owner string // Specifies the owner. Defaults to executable name. - - // MemorySizeInMB sets the UVM memory. If `0` will default to platform - // default. - MemorySizeInMB uint64 - - LowMMIOGapInMB uint64 - HighMMIOBaseInMB uint64 - HighMMIOGapInMB uint64 - - // Memory for UVM. Defaults to true. For physical backed memory, set to - // false. - AllowOvercommit bool - - // FullyPhysicallyBacked describes if a uvm should be entirely physically - // backed, including in any additional devices - FullyPhysicallyBacked bool - - // Memory for UVM. Defaults to false. For virtual memory with deferred - // commit, set to true. - EnableDeferredCommit bool - - // ProcessorCount sets the number of vCPU's. If `0` will default to platform - // default. - ProcessorCount int32 - - // ProcessorLimit sets the maximum percentage of each vCPU's the UVM can - // consume. If `0` will default to platform default. - ProcessorLimit int32 - - // ProcessorWeight sets the relative weight of these vCPU's vs another UVM's - // when scheduling. If `0` will default to platform default. - ProcessorWeight int32 - - // StorageQoSIopsMaximum sets the maximum number of Iops. If `0` will - // default to the platform default. - StorageQoSIopsMaximum int32 - - // StorageQoSIopsMaximum sets the maximum number of bytes per second. If `0` - // will default to the platform default. - StorageQoSBandwidthMaximum int32 - - // DisableCompartmentNamespace sets whether to disable namespacing the network compartment in the UVM - // for WCOW. Namespacing makes it so the compartment created for a container is essentially no longer - // aware or able to see any of the other compartments on the host (in this case the UVM). - // The compartment that the container is added to now behaves as the default compartment as - // far as the container is concerned and it is only able to view the NICs in the compartment it's assigned to. - // This is the compartment setup (and behavior) that is followed for V1 HCS schema containers (docker) so - // this change brings parity as well. This behavior is gated behind a registry key currently to avoid any - // unnecessary behavior and once this restriction is removed then we can remove the need for this variable - // and the associated annotation as well. - DisableCompartmentNamespace bool - - // CPUGroupID set the ID of a CPUGroup on the host that the UVM should be added to on start. - // Defaults to an empty string which indicates the UVM should not be added to any CPUGroup. - CPUGroupID string - // NetworkConfigProxy holds the address of the network config proxy service. - // This != "" determines whether to start the ComputeAgent TTRPC service - // that receives the UVMs set of NICs from this proxy instead of enumerating - // the endpoints locally. - NetworkConfigProxy string - - // Sets the location for process dumps to be placed in. On Linux this is a kernel setting so it will be - // applied to all containers. On Windows it's configurable per container, but we can mimic this for - // Windows by just applying the location specified here per container. - ProcessDumpLocation string - - // NoWritableFileShares disables adding any writable vSMB and Plan9 shares to the UVM - NoWritableFileShares bool - - // The number of SCSI controllers. Defaults to 1 for WCOW and 4 for LCOW - SCSIControllerCount uint32 -} - -// compares the create opts used during template creation with the create opts -// provided for clone creation. If they don't match (except for a few fields) -// then clone creation is failed. -func verifyCloneUvmCreateOpts(templateOpts, cloneOpts *OptionsWCOW) bool { - // Following fields can be different in the template and clone configurations. - // 1. the scratch layer path. i.e the last element of the LayerFolders path. - // 2. IsTemplate, IsClone and TemplateConfig variables. - // 3. ID - // 4. AdditionalHCSDocumentJSON - - // Save the original values of the fields that we want to ignore and replace them with - // the same values as that of the other object. So that we can simply use `==` operator. - templateIDBackup := templateOpts.ID - templateOpts.ID = cloneOpts.ID - - // We can't use `==` operator on structs which include slices in them. So compare the - // Layerfolders separately and then directly compare the Options struct. - result := (len(templateOpts.LayerFolders) == len(cloneOpts.LayerFolders)) - for i := 0; result && i < len(templateOpts.LayerFolders)-1; i++ { - result = result && (templateOpts.LayerFolders[i] == cloneOpts.LayerFolders[i]) - } - result = result && (*templateOpts.Options == *cloneOpts.Options) - - // set original values - templateOpts.ID = templateIDBackup - return result -} - -// Verifies that the final UVM options are correct and supported. -func verifyOptions(ctx context.Context, options interface{}) error { - switch opts := options.(type) { - case *OptionsLCOW: - if opts.EnableDeferredCommit && !opts.AllowOvercommit { - return errors.New("EnableDeferredCommit is not supported on physically backed VMs") - } - if opts.SCSIControllerCount > MaxSCSIControllers { - return fmt.Errorf("SCSI controller count can't be more than %d", MaxSCSIControllers) - } - if opts.VPMemDeviceCount > MaxVPMEMCount { - return fmt.Errorf("VPMem device count cannot be greater than %d", MaxVPMEMCount) - } - if opts.VPMemDeviceCount > 0 { - if opts.VPMemSizeBytes%4096 != 0 { - return errors.New("VPMemSizeBytes must be a multiple of 4096") - } - } - if opts.KernelDirect && osversion.Build() < 18286 { - return errors.New("KernelDirectBoot is not supported on builds older than 18286") - } - - if opts.EnableColdDiscardHint && osversion.Build() < 18967 { - return errors.New("EnableColdDiscardHint is not supported on builds older than 18967") - } - case *OptionsWCOW: - if opts.EnableDeferredCommit && !opts.AllowOvercommit { - return errors.New("EnableDeferredCommit is not supported on physically backed VMs") - } - if len(opts.LayerFolders) < 2 { - return errors.New("at least 2 LayerFolders must be supplied") - } - if opts.SCSIControllerCount != 1 { - return errors.New("exactly 1 SCSI controller is required for WCOW") - } - if opts.IsClone && !verifyCloneUvmCreateOpts(&opts.TemplateConfig.CreateOpts, opts) { - return errors.New("clone configuration doesn't match with template configuration") - } - if opts.IsClone && opts.TemplateConfig == nil { - return errors.New("template config can not be nil when creating clone") - } - if opts.IsTemplate && opts.FullyPhysicallyBacked { - return errors.New("template can not be created from a full physically backed UVM") - } - } - return nil -} - -// newDefaultOptions returns the default base options for WCOW and LCOW. -// -// If `id` is empty it will be generated. -// -// If `owner` is empty it will be set to the calling executables name. -func newDefaultOptions(id, owner string) *Options { - opts := &Options{ - ID: id, - Owner: owner, - MemorySizeInMB: 1024, - AllowOvercommit: true, - EnableDeferredCommit: false, - ProcessorCount: defaultProcessorCount(), - FullyPhysicallyBacked: false, - NoWritableFileShares: false, - SCSIControllerCount: 1, - } - - if opts.Owner == "" { - opts.Owner = filepath.Base(os.Args[0]) - } - - return opts -} - -// ID returns the ID of the VM's compute system. -func (uvm *UtilityVM) ID() string { - return uvm.hcsSystem.ID() -} - -// OS returns the operating system of the utility VM. -func (uvm *UtilityVM) OS() string { - return uvm.operatingSystem -} - -func (uvm *UtilityVM) create(ctx context.Context, doc interface{}) error { - uvm.exitCh = make(chan struct{}) - system, err := hcs.CreateComputeSystem(ctx, uvm.id, doc) - if err != nil { - return err - } - defer func() { - if system != nil { - _ = system.Terminate(ctx) - _ = system.Wait() - } - }() - - // Cache the VM ID of the utility VM. - properties, err := system.Properties(ctx) - if err != nil { - return err - } - uvm.runtimeID = properties.RuntimeID - uvm.hcsSystem = system - system = nil - - log.G(ctx).WithFields(logrus.Fields{ - logfields.UVMID: uvm.id, - "runtime-id": uvm.runtimeID.String(), - }).Debug("created utility VM") - return nil -} - -// Close terminates and releases resources associated with the utility VM. -func (uvm *UtilityVM) Close() (err error) { - ctx, span := oc.StartSpan(context.Background(), "uvm::Close") - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute(logfields.UVMID, uvm.id)) - - windows.Close(uvm.vmmemProcess) - - if uvm.hcsSystem != nil { - _ = uvm.hcsSystem.Terminate(ctx) - _ = uvm.Wait() - } - - if err := uvm.CloseGCSConnection(); err != nil { - log.G(ctx).Errorf("close GCS connection failed: %s", err) - } - - // outputListener will only be nil for a Create -> Stop without a Start. In - // this case we have no goroutine processing output so its safe to close the - // channel here. - if uvm.outputListener != nil { - close(uvm.outputProcessingDone) - uvm.outputListener.Close() - uvm.outputListener = nil - } - if uvm.hcsSystem != nil { - return uvm.hcsSystem.Close() - } - return nil -} - -// CreateContainer creates a container in the utility VM. -func (uvm *UtilityVM) CreateContainer(ctx context.Context, id string, settings interface{}) (cow.Container, error) { - if uvm.gc != nil { - c, err := uvm.gc.CreateContainer(ctx, id, settings) - if err != nil { - return nil, fmt.Errorf("failed to create container %s: %s", id, err) - } - return c, nil - } - doc := hcsschema.ComputeSystem{ - HostingSystemId: uvm.id, - Owner: uvm.owner, - SchemaVersion: schemaversion.SchemaV21(), - ShouldTerminateOnLastHandleClosed: true, - HostedSystem: settings, - } - c, err := hcs.CreateComputeSystem(ctx, id, &doc) - if err != nil { - return nil, err - } - return c, err -} - -// CreateProcess creates a process in the utility VM. -func (uvm *UtilityVM) CreateProcess(ctx context.Context, settings interface{}) (cow.Process, error) { - if uvm.gc != nil { - return uvm.gc.CreateProcess(ctx, settings) - } - return uvm.hcsSystem.CreateProcess(ctx, settings) -} - -// IsOCI returns false, indicating the parameters to CreateProcess should not -// include an OCI spec. -func (uvm *UtilityVM) IsOCI() bool { - return false -} - -// Terminate requests that the utility VM be terminated. -func (uvm *UtilityVM) Terminate(ctx context.Context) error { - return uvm.hcsSystem.Terminate(ctx) -} - -// ExitError returns an error if the utility VM has terminated unexpectedly. -func (uvm *UtilityVM) ExitError() error { - return uvm.hcsSystem.ExitError() -} - -func defaultProcessorCount() int32 { - if runtime.NumCPU() == 1 { - return 1 - } - return 2 -} - -// normalizeProcessorCount sets `uvm.processorCount` to `Min(requested, -// logical CPU count)`. -func (uvm *UtilityVM) normalizeProcessorCount(ctx context.Context, requested int32, processorTopology *hcsschema.ProcessorTopology) int32 { - // Use host processor information retrieved from HCS instead of runtime.NumCPU, - // GetMaximumProcessorCount or other OS level calls for two reasons. - // 1. Go uses GetProcessAffinityMask and falls back to GetSystemInfo both of - // which will not return LPs in another processor group. - // 2. GetMaximumProcessorCount will return all processors on the system - // but in configurations where the host partition doesn't see the full LP count - // i.e "Minroot" scenarios this won't be sufficient. - // (https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/manage/manage-hyper-v-minroot-2016) - hostCount := int32(processorTopology.LogicalProcessorCount) - if requested > hostCount { - log.G(ctx).WithFields(logrus.Fields{ - logfields.UVMID: uvm.id, - "requested": requested, - "assigned": hostCount, - }).Warn("Changing user requested CPUCount to current number of processors") - return hostCount - } else { - return requested - } -} - -// ProcessorCount returns the number of processors actually assigned to the UVM. -func (uvm *UtilityVM) ProcessorCount() int32 { - return uvm.processorCount -} - -// PhysicallyBacked returns if the UVM is backed by physical memory -// (Over commit and deferred commit both false) -func (uvm *UtilityVM) PhysicallyBacked() bool { - return uvm.physicallyBacked -} - -// ProcessDumpLocation returns the location that process dumps will get written to for containers running -// in the UVM. -func (uvm *UtilityVM) ProcessDumpLocation() string { - return uvm.processDumpLocation -} - -func (uvm *UtilityVM) normalizeMemorySize(ctx context.Context, requested uint64) uint64 { - actual := (requested + 1) &^ 1 // align up to an even number - if requested != actual { - log.G(ctx).WithFields(logrus.Fields{ - logfields.UVMID: uvm.id, - "requested": requested, - "assigned": actual, - }).Warn("Changing user requested MemorySizeInMB to align to 2MB") - } - return actual -} - -// DevicesPhysicallyBacked describes if additional devices added to the UVM -// should be physically backed -func (uvm *UtilityVM) DevicesPhysicallyBacked() bool { - return uvm.devicesPhysicallyBacked -} - -// VSMBNoDirectMap returns if VSMB devices should be mounted with `NoDirectMap` set to true -func (uvm *UtilityVM) VSMBNoDirectMap() bool { - return uvm.vsmbNoDirectMap -} - -func (uvm *UtilityVM) NoWritableFileShares() bool { - return uvm.noWritableFileShares -} - -// Closes the external GCS connection if it is being used and also closes the -// listener for GCS connection. -func (uvm *UtilityVM) CloseGCSConnection() (err error) { - if uvm.gc != nil { - err = uvm.gc.Close() - } - if uvm.gcListener != nil { - err = uvm.gcListener.Close() - } - return -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go deleted file mode 100644 index 3792307ab5..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go +++ /dev/null @@ -1,819 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "encoding/base64" - "fmt" - "io" - "net" - "os" - "path/filepath" - "strings" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/pkg/securitypolicy" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" - - "github.com/Microsoft/hcsshim/internal/gcs" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/processorinfo" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/schemaversion" - "github.com/Microsoft/hcsshim/osversion" -) - -// General information about how this works at a high level. -// -// The purpose is to start an LCOW Utility VM or UVM using the Host Compute Service, an API to create and manipulate running virtual machines -// HCS takes json descriptions of the work to be done. -// -// When a pod (there is a one to one mapping of pod to UVM) is to be created various annotations and defaults are combined into an options object which is -// passed to CreateLCOW (see below) where the options are transformed into a json document to be presented to the HCS VM creation code. -// -// There are two paths in CreateLCOW to creating the json document. The most flexible case is makeLCOWDoc which is used where no specialist hardware security -// applies, then there is makeLCOWSecurityDoc which is used in the case of AMD SEV-SNP memory encryption and integrity protection. There is quite -// a lot of difference between the two paths, for example the regular path has options about the type of kernel and initrd binary whereas the AMD SEV-SNP -// path has only one file but there are many other detail differences, so the code is split for clarity. -// -// makeLCOW*Doc returns an instance of hcsschema.ComputeSystem. That is then serialised to the json string provided to the flat C api. A similar scheme is used -// for later adjustments, for example adding a newtwork adpator. -// -// Examples of the eventual json are inline as comments by these two functions to show the eventual effect of the code. -// -// Note that the schema files, ie the Go objects that represent the json, are generated outside of the local build process. - -type PreferredRootFSType int - -const ( - PreferredRootFSTypeInitRd PreferredRootFSType = iota - PreferredRootFSTypeVHD - PreferredRootFSTypeNA - - entropyVsockPort = 1 - linuxLogVsockPort = 109 -) - -// OutputHandler is used to process the output from the program run in the UVM. -type OutputHandler func(io.Reader) - -const ( - // InitrdFile is the default file name for an initrd.img used to boot LCOW. - InitrdFile = "initrd.img" - // VhdFile is the default file name for a rootfs.vhd used to boot LCOW. - VhdFile = "rootfs.vhd" - // KernelFile is the default file name for a kernel used to boot LCOW. - KernelFile = "kernel" - // UncompressedKernelFile is the default file name for an uncompressed - // kernel used to boot LCOW with KernelDirect. - UncompressedKernelFile = "vmlinux" - // In the SNP case both the kernel (bzImage) and initrd are stored in a vmgs (VM Guest State) file - GuestStateFile = "kernelinitrd.vmgs" -) - -// OptionsLCOW are the set of options passed to CreateLCOW() to create a utility vm. -type OptionsLCOW struct { - *Options - - BootFilesPath string // Folder in which kernel and root file system reside. Defaults to \Program Files\Linux Containers - KernelFile string // Filename under `BootFilesPath` for the kernel. Defaults to `kernel` - KernelDirect bool // Skip UEFI and boot directly to `kernel` - RootFSFile string // Filename under `BootFilesPath` for the UVMs root file system. Defaults to `InitrdFile` - KernelBootOptions string // Additional boot options for the kernel - EnableGraphicsConsole bool // If true, enable a graphics console for the utility VM - ConsolePipe string // The named pipe path to use for the serial console. eg \\.\pipe\vmpipe - UseGuestConnection bool // Whether the HCS should connect to the UVM's GCS. Defaults to true - ExecCommandLine string // The command line to exec from init. Defaults to GCS - ForwardStdout bool // Whether stdout will be forwarded from the executed program. Defaults to false - ForwardStderr bool // Whether stderr will be forwarded from the executed program. Defaults to true - OutputHandler OutputHandler `json:"-"` // Controls how output received over HVSocket from the UVM is handled. Defaults to parsing output as logrus messages - VPMemDeviceCount uint32 // Number of VPMem devices. Defaults to `DefaultVPMEMCount`. Limit at 128. If booting UVM from VHD, device 0 is taken. - VPMemSizeBytes uint64 // Size of the VPMem devices. Defaults to `DefaultVPMemSizeBytes`. - VPMemNoMultiMapping bool // Disables LCOW layer multi mapping - PreferredRootFSType PreferredRootFSType // If `KernelFile` is `InitrdFile` use `PreferredRootFSTypeInitRd`. If `KernelFile` is `VhdFile` use `PreferredRootFSTypeVHD` - EnableColdDiscardHint bool // Whether the HCS should use cold discard hints. Defaults to false - VPCIEnabled bool // Whether the kernel should enable pci - EnableScratchEncryption bool // Whether the scratch should be encrypted - SecurityPolicy string // Optional security policy - SecurityPolicyEnabled bool // Set when there is a security policy to apply on actual SNP hardware, use this rathen than checking the string length - UseGuestStateFile bool // Use a vmgs file that contains a kernel and initrd, required for SNP - GuestStateFile string // The vmgs file to load - DisableTimeSyncService bool // Disables the time synchronization service -} - -// defaultLCOWOSBootFilesPath returns the default path used to locate the LCOW -// OS kernel and root FS files. This default is the subdirectory -// `LinuxBootFiles` in the directory of the executable that started the current -// process; or, if it does not exist, `%ProgramFiles%\Linux Containers`. -func defaultLCOWOSBootFilesPath() string { - localDirPath := filepath.Join(filepath.Dir(os.Args[0]), "LinuxBootFiles") - if _, err := os.Stat(localDirPath); err == nil { - return localDirPath - } - return filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers") -} - -// NewDefaultOptionsLCOW creates the default options for a bootable version of -// LCOW. -// -// `id` the ID of the compute system. If not passed will generate a new GUID. -// -// `owner` the owner of the compute system. If not passed will use the -// executable files name. -func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW { - // Use KernelDirect boot by default on all builds that support it. - kernelDirectSupported := osversion.Build() >= 18286 - opts := &OptionsLCOW{ - Options: newDefaultOptions(id, owner), - BootFilesPath: defaultLCOWOSBootFilesPath(), - KernelFile: KernelFile, - KernelDirect: kernelDirectSupported, - RootFSFile: InitrdFile, - KernelBootOptions: "", - EnableGraphicsConsole: false, - ConsolePipe: "", - UseGuestConnection: true, - ExecCommandLine: fmt.Sprintf("/bin/gcs -v4 -log-format json -loglevel %s", logrus.StandardLogger().Level.String()), - ForwardStdout: false, - ForwardStderr: true, - OutputHandler: parseLogrus(id), - VPMemDeviceCount: DefaultVPMEMCount, - VPMemSizeBytes: DefaultVPMemSizeBytes, - VPMemNoMultiMapping: osversion.Get().Build < osversion.V19H1, - PreferredRootFSType: PreferredRootFSTypeInitRd, - EnableColdDiscardHint: false, - VPCIEnabled: false, - EnableScratchEncryption: false, - SecurityPolicyEnabled: false, - SecurityPolicy: "", - GuestStateFile: "", - DisableTimeSyncService: false, - } - - if _, err := os.Stat(filepath.Join(opts.BootFilesPath, VhdFile)); err == nil { - // We have a rootfs.vhd in the boot files path. Use it over an initrd.img - opts.RootFSFile = VhdFile - opts.PreferredRootFSType = PreferredRootFSTypeVHD - } - - if kernelDirectSupported { - // KernelDirect supports uncompressed kernel if the kernel is present. - // Default to uncompressed if on box. NOTE: If `kernel` is already - // uncompressed and simply named 'kernel' it will still be used - // uncompressed automatically. - if _, err := os.Stat(filepath.Join(opts.BootFilesPath, UncompressedKernelFile)); err == nil { - opts.KernelFile = UncompressedKernelFile - } - } - return opts -} - -// Get an acceptable number of processors given option and actual constraints. -func fetchProcessor(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (*hcsschema.Processor2, error) { - processorTopology, err := processorinfo.HostProcessorInfo(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get host processor information: %s", err) - } - - // To maintain compatibility with Docker we need to automatically downgrade - // a user CPU count if the setting is not possible. - uvm.processorCount = uvm.normalizeProcessorCount(ctx, opts.ProcessorCount, processorTopology) - - processor := &hcsschema.Processor2{ - Count: uvm.processorCount, - Limit: opts.ProcessorLimit, - Weight: opts.ProcessorWeight, - } - // We can set a cpu group for the VM at creation time in recent builds. - if opts.CPUGroupID != "" { - if osversion.Build() < osversion.V21H1 { - return nil, errCPUGroupCreateNotSupported - } - processor.CpuGroup = &hcsschema.CpuGroup{Id: opts.CPUGroupID} - } - return processor, nil -} - -/* -Example JSON document produced once the hcsschema.ComputeSytem returned by makeLCOWSecurityDoc is serialised: -{ - "Owner": "containerd-shim-runhcs-v1.exe", - "SchemaVersion": { - "Major": 2, - "Minor": 5 - }, - "ShouldTerminateOnLastHandleClosed": true, - "VirtualMachine": { - "Chipset": { - "Uefi": { - "ApplySecureBootTemplate": "Apply", - "SecureBootTemplateId": "1734c6e8-3154-4dda-ba5f-a874cc483422" - } - }, - "ComputeTopology": { - "Memory": { - "SizeInMB": 1024 - }, - "Processor": { - "Count": 2 - } - }, - "Devices": { - "Scsi" : { "0" : {} }, - "HvSocket": { - "HvSocketConfig": { - "DefaultBindSecurityDescriptor": "D:P(A;;FA;;;WD)", - "DefaultConnectSecurityDescriptor": "D:P(A;;FA;;;SY)(A;;FA;;;BA)", - "ServiceTable" : { - "00000808-facb-11e6-bd58-64006a7986d3" : { - "AllowWildcardBinds" : true, - "BindSecurityDescriptor": "D:P(A;;FA;;;WD)", - "ConnectSecurityDescriptor": "D:P(A;;FA;;;SY)(A;;FA;;;BA)" - }, - "0000006d-facb-11e6-bd58-64006a7986d3" : { - "AllowWildcardBinds" : true, - "BindSecurityDescriptor": "D:P(A;;FA;;;WD)", - "ConnectSecurityDescriptor": "D:P(A;;FA;;;SY)(A;;FA;;;BA)" - }, - "00000001-facb-11e6-bd58-64006a7986d3" : { - "AllowWildcardBinds" : true, - "BindSecurityDescriptor": "D:P(A;;FA;;;WD)", - "ConnectSecurityDescriptor": "D:P(A;;FA;;;SY)(A;;FA;;;BA)" - }, - "40000000-facb-11e6-bd58-64006a7986d3" : { - "AllowWildcardBinds" : true, - "BindSecurityDescriptor": "D:P(A;;FA;;;WD)", - "ConnectSecurityDescriptor": "D:P(A;;FA;;;SY)(A;;FA;;;BA)" - } - } - } - } - }, - "GuestState": { - "GuestStateFilePath": "d:\\ken\\aug27\\gcsinitnew.vmgs", - "GuestStateFileType": "FileMode", - "ForceTransientState": true - }, - "SecuritySettings": { - "Isolation": { - "IsolationType": "SecureNestedPaging", - "LaunchData": "kBifgKNijdHjxdSUshmavrNofo2B01LiIi1cr8R4ytI=" - } - }, - "Version": { - "Major": 254, - "Minor": 0 - } - } -} -*/ - -// A large part of difference between the SNP case and the usual kernel+option+initrd case is to do with booting -// from a VMGS file. The VMGS part may be used other than with SNP so is split out here. - -// Make a hcsschema.ComputeSytem with the parts that target booting from a VMGS file -func makeLCOWVMGSDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcsschema.ComputeSystem, err error) { - // Kernel and initrd are combined into a single vmgs file. - vmgsFullPath := filepath.Join(opts.BootFilesPath, opts.GuestStateFile) - if _, err := os.Stat(vmgsFullPath); os.IsNotExist(err) { - return nil, fmt.Errorf("the GuestState vmgs file '%s' was not found", vmgsFullPath) - } - - var processor *hcsschema.Processor2 - processor, err = fetchProcessor(ctx, opts, uvm) - if err != nil { - return nil, err - } - - // Align the requested memory size. - memorySizeInMB := uvm.normalizeMemorySize(ctx, opts.MemorySizeInMB) - - doc := &hcsschema.ComputeSystem{ - Owner: uvm.owner, - SchemaVersion: schemaversion.SchemaV25(), - ShouldTerminateOnLastHandleClosed: true, - VirtualMachine: &hcsschema.VirtualMachine{ - StopOnReset: true, - Chipset: &hcsschema.Chipset{}, - ComputeTopology: &hcsschema.Topology{ - Memory: &hcsschema.Memory2{ - SizeInMB: memorySizeInMB, - AllowOvercommit: opts.AllowOvercommit, - EnableDeferredCommit: opts.EnableDeferredCommit, - EnableColdDiscardHint: opts.EnableColdDiscardHint, - LowMMIOGapInMB: opts.LowMMIOGapInMB, - HighMMIOBaseInMB: opts.HighMMIOBaseInMB, - HighMMIOGapInMB: opts.HighMMIOGapInMB, - }, - Processor: processor, - }, - Devices: &hcsschema.Devices{ - HvSocket: &hcsschema.HvSocket2{ - HvSocketConfig: &hcsschema.HvSocketSystemConfig{ - // Allow administrators and SYSTEM to bind to vsock sockets - // so that we can create a GCS log socket. - DefaultBindSecurityDescriptor: "D:P(A;;FA;;;WD)", // Differs for SNP - DefaultConnectSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)", - ServiceTable: make(map[string]hcsschema.HvSocketServiceConfig), - }, - }, - }, - }, - } - - // Set permissions for the VSock ports: - // entropyVsockPort - 1 is the entropy port, - // linuxLogVsockPort - 109 used by vsockexec to log stdout/stderr logging, - // 0x40000000 + 1 (LinuxGcsVsockPort + 1) is the bridge (see guestconnectiuon.go) - - hvSockets := [...]uint32{entropyVsockPort, linuxLogVsockPort, gcs.LinuxGcsVsockPort, gcs.LinuxGcsVsockPort + 1} - for _, whichSocket := range hvSockets { - key := fmt.Sprintf("%08x-facb-11e6-bd58-64006a7986d3", whichSocket) // format of a linux hvsock GUID is port#-facb-11e6-bd58-64006a7986d3 - doc.VirtualMachine.Devices.HvSocket.HvSocketConfig.ServiceTable[key] = hcsschema.HvSocketServiceConfig{ - AllowWildcardBinds: true, - BindSecurityDescriptor: "D:P(A;;FA;;;WD)", - ConnectSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)", - } - } - - // Handle StorageQoS if set - if opts.StorageQoSBandwidthMaximum > 0 || opts.StorageQoSIopsMaximum > 0 { - doc.VirtualMachine.StorageQoS = &hcsschema.StorageQoS{ - IopsMaximum: opts.StorageQoSIopsMaximum, - BandwidthMaximum: opts.StorageQoSBandwidthMaximum, - } - } - - if uvm.scsiControllerCount > 0 { - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} - for i := 0; i < int(uvm.scsiControllerCount); i++ { - doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ - Attachments: make(map[string]hcsschema.Attachment), - } - } - } - - // The rootfs must be provided as an initrd within the VMGS file. - // Raise an error if instructed to use a particular sort of rootfs. - - if opts.PreferredRootFSType != PreferredRootFSTypeNA { - return nil, fmt.Errorf("cannot override rootfs when using VMGS file") - } - - // Required by HCS for the isolated boot scheme, see also https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/learn-more/generation-2-virtual-machine-security-settings-for-hyper-v - // A complete explanation of the why's and wherefores of starting an encrypted, isolated VM are beond the scope of these comments. - - doc.VirtualMachine.Chipset.Uefi = &hcsschema.Uefi{ - ApplySecureBootTemplate: "Apply", - SecureBootTemplateId: "1734c6e8-3154-4dda-ba5f-a874cc483422", // aka MicrosoftWindowsSecureBootTemplateGUID equivalent to "Microsoft Windows" template from Get-VMHost | select SecureBootTemplates, - - } - - // Point at the file that contains the linux kernel and initrd images. - - doc.VirtualMachine.GuestState = &hcsschema.GuestState{ - GuestStateFilePath: vmgsFullPath, - GuestStateFileType: "FileMode", - ForceTransientState: true, // tell HCS that this is just the source of the images, not ongoing state - } - - return doc, nil -} - -// Programatically make the hcsschema.ComputeSystem document for the SNP case. -// This is done prior to json seriaisation and sending to the HCS layer to actually do the work of creating the VM. -// Many details are quite different (see the typical JSON examples), in particular it boots from a VMGS file -// which contains both the kernel and initrd as well as kernel boot options. -func makeLCOWSecurityDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcsschema.ComputeSystem, err error) { - doc, vmgsErr := makeLCOWVMGSDoc(ctx, opts, uvm) - if vmgsErr != nil { - return nil, vmgsErr - } - - // Part of the protocol to ensure that the rules in the user's Security Policy are - // respected is to provide a hash of the policy to the hardware. This is immutable - // and can be used to check that the policy used by opengcs is the required one as - // a condition of releasing secrets to the container. - - policyDigest, err := securitypolicy.NewSecurityPolicyDigest(opts.SecurityPolicy) - if err != nil { - return nil, err - } - // HCS API expect a base64 encoded string as LaunchData. Internally it - // decodes it to bytes. SEV later returns the decoded byte blob as HostData - // field of the report. - hostData := base64.StdEncoding.EncodeToString(policyDigest) - - // Put the measurement into the LaunchData field of the HCS creation command. - // This will end-up in HOST_DATA of SNP_LAUNCH_FINISH command the and ATTESTATION_REPORT - // retrieved by the guest later. - doc.VirtualMachine.SecuritySettings = &hcsschema.SecuritySettings{ - EnableTpm: false, - Isolation: &hcsschema.IsolationSettings{ - IsolationType: "SecureNestedPaging", - LaunchData: hostData, - // HclEnabled: true, /* Not available in schema 2.5 - REQUIRED when using BlockStorage in 2.6 */ - }, - } - - return doc, nil -} - -/* -Example JSON document produced once the hcsschema.ComputeSytem returned by makeLCOWDoc is serialised. Note that the boot scheme is entirely different. -{ - "Owner": "containerd-shim-runhcs-v1.exe", - "SchemaVersion": { - "Major": 2, - "Minor": 1 - }, - "VirtualMachine": { - "StopOnReset": true, - "Chipset": { - "LinuxKernelDirect": { - "KernelFilePath": "C:\\ContainerPlat\\LinuxBootFiles\\vmlinux", - "InitRdPath": "C:\\ContainerPlat\\LinuxBootFiles\\initrd.img", - "KernelCmdLine": " 8250_core.nr_uarts=0 panic=-1 quiet pci=off nr_cpus=2 brd.rd_nr=0 pmtmr=0 -- -e 1 /bin/vsockexec -e 109 /bin/gcs -v4 -log-format json -loglevel debug" - } - }, - "ComputeTopology": { - "Memory": { - "SizeInMB": 1024, - "AllowOvercommit": true - }, - "Processor": { - "Count": 2 - } - }, - "Devices": { - "Scsi": { - "0": {} - }, - "HvSocket": { - "HvSocketConfig": { - "DefaultBindSecurityDescriptor": "D:P(A;;FA;;;SY)(A;;FA;;;BA)" - } - }, - "Plan9": {} - } - }, - "ShouldTerminateOnLastHandleClosed": true -} -*/ - -// Make the ComputeSystem document object that will be serialised to json to be presented to the HCS api. -func makeLCOWDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcsschema.ComputeSystem, err error) { - logrus.Tracef("makeLCOWDoc %v\n", opts) - - kernelFullPath := filepath.Join(opts.BootFilesPath, opts.KernelFile) - if _, err := os.Stat(kernelFullPath); os.IsNotExist(err) { - return nil, fmt.Errorf("kernel: '%s' not found", kernelFullPath) - } - rootfsFullPath := filepath.Join(opts.BootFilesPath, opts.RootFSFile) - if _, err := os.Stat(rootfsFullPath); os.IsNotExist(err) { - return nil, fmt.Errorf("boot file: '%s' not found", rootfsFullPath) - } - - var processor *hcsschema.Processor2 - processor, err = fetchProcessor(ctx, opts, uvm) // must happen after the file existence tests above. - if err != nil { - return nil, err - } - - // Align the requested memory size. - memorySizeInMB := uvm.normalizeMemorySize(ctx, opts.MemorySizeInMB) - - doc := &hcsschema.ComputeSystem{ - Owner: uvm.owner, - SchemaVersion: schemaversion.SchemaV21(), - ShouldTerminateOnLastHandleClosed: true, - VirtualMachine: &hcsschema.VirtualMachine{ - StopOnReset: true, - Chipset: &hcsschema.Chipset{}, - ComputeTopology: &hcsschema.Topology{ - Memory: &hcsschema.Memory2{ - SizeInMB: memorySizeInMB, - AllowOvercommit: opts.AllowOvercommit, - EnableDeferredCommit: opts.EnableDeferredCommit, - EnableColdDiscardHint: opts.EnableColdDiscardHint, - LowMMIOGapInMB: opts.LowMMIOGapInMB, - HighMMIOBaseInMB: opts.HighMMIOBaseInMB, - HighMMIOGapInMB: opts.HighMMIOGapInMB, - }, - Processor: processor, - }, - Devices: &hcsschema.Devices{ - HvSocket: &hcsschema.HvSocket2{ - HvSocketConfig: &hcsschema.HvSocketSystemConfig{ - // Allow administrators and SYSTEM to bind to vsock sockets - // so that we can create a GCS log socket. - DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)", - }, - }, - Plan9: &hcsschema.Plan9{}, - }, - }, - } - - // Handle StorageQoS if set - if opts.StorageQoSBandwidthMaximum > 0 || opts.StorageQoSIopsMaximum > 0 { - doc.VirtualMachine.StorageQoS = &hcsschema.StorageQoS{ - IopsMaximum: opts.StorageQoSIopsMaximum, - BandwidthMaximum: opts.StorageQoSBandwidthMaximum, - } - } - - if uvm.scsiControllerCount > 0 { - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} - for i := 0; i < int(uvm.scsiControllerCount); i++ { - doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ - Attachments: make(map[string]hcsschema.Attachment), - } - } - } - - if uvm.vpmemMaxCount > 0 { - doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{ - MaximumCount: uvm.vpmemMaxCount, - MaximumSizeBytes: uvm.vpmemMaxSizeBytes, - } - } - - var kernelArgs string - switch opts.PreferredRootFSType { - case PreferredRootFSTypeInitRd: - if !opts.KernelDirect { - kernelArgs = "initrd=/" + opts.RootFSFile - } - case PreferredRootFSTypeVHD: - if uvm.vpmemMaxCount > 0 { - // Support for VPMem VHD(X) booting rather than initrd.. - kernelArgs = "root=/dev/pmem0 ro rootwait init=/init" - imageFormat := "Vhd1" - if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" { - imageFormat = "Vhdx" - } - doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{ - "0": { - HostPath: rootfsFullPath, - ReadOnly: true, - ImageFormat: imageFormat, - }, - } - if uvm.vpmemMultiMapping { - pmem := newPackedVPMemDevice() - pmem.maxMappedDeviceCount = 1 - - st, err := os.Stat(rootfsFullPath) - if err != nil { - return nil, errors.Wrapf(err, "failed to stat rootfs: %q", rootfsFullPath) - } - devSize := pageAlign(uint64(st.Size())) - memReg, err := pmem.Allocate(devSize) - if err != nil { - return nil, errors.Wrap(err, "failed to allocate memory for rootfs") - } - defer func() { - if err != nil { - if err = pmem.Release(memReg); err != nil { - log.G(ctx).WithError(err).Debug("failed to release memory region") - } - } - }() - - dev := newVPMemMappedDevice(opts.RootFSFile, "/", devSize, memReg) - if err := pmem.mapVHDLayer(ctx, dev); err != nil { - return nil, errors.Wrapf(err, "failed to save internal state for a multi-mapped rootfs device") - } - uvm.vpmemDevicesMultiMapped[0] = pmem - } else { - dev := newDefaultVPMemInfo(opts.RootFSFile, "/") - uvm.vpmemDevicesDefault[0] = dev - } - } else { - kernelArgs = "root=/dev/sda ro rootwait init=/init" - doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"] = hcsschema.Attachment{ - Type_: "VirtualDisk", - Path: rootfsFullPath, - ReadOnly: true, - } - uvm.scsiLocations[0][0] = newSCSIMount(uvm, rootfsFullPath, "/", "VirtualDisk", "", 1, 0, 0, true, false) - - } - } - - vmDebugging := false - if opts.ConsolePipe != "" { - vmDebugging = true - kernelArgs += " 8250_core.nr_uarts=1 8250_core.skip_txen_test=1 console=ttyS0,115200" - doc.VirtualMachine.Devices.ComPorts = map[string]hcsschema.ComPort{ - "0": { // Which is actually COM1 - NamedPipe: opts.ConsolePipe, - }, - } - } else { - kernelArgs += " 8250_core.nr_uarts=0" - } - - if opts.EnableGraphicsConsole { - vmDebugging = true - kernelArgs += " console=tty" - doc.VirtualMachine.Devices.Keyboard = &hcsschema.Keyboard{} - doc.VirtualMachine.Devices.EnhancedModeVideo = &hcsschema.EnhancedModeVideo{} - doc.VirtualMachine.Devices.VideoMonitor = &hcsschema.VideoMonitor{} - } - - if !vmDebugging { - // Terminate the VM if there is a kernel panic. - kernelArgs += " panic=-1 quiet" - } - - // Add Kernel Boot options - if opts.KernelBootOptions != "" { - kernelArgs += " " + opts.KernelBootOptions - } - - if !opts.VPCIEnabled { - kernelArgs += ` pci=off` - } - - // Inject initial entropy over vsock during init launch. - entropyArgs := fmt.Sprintf("-e %d", entropyVsockPort) - - // With default options, run GCS with stderr pointing to the vsock port - // created below in order to forward guest logs to logrus. - execCmdArgs := "/bin/vsockexec" - - if opts.ForwardStdout { - execCmdArgs += fmt.Sprintf(" -o %d", linuxLogVsockPort) - } - - if opts.ForwardStderr { - execCmdArgs += fmt.Sprintf(" -e %d", linuxLogVsockPort) - } - - if opts.DisableTimeSyncService { - opts.ExecCommandLine = fmt.Sprintf("%s -disable-time-sync", opts.ExecCommandLine) - } - - if log.IsScrubbingEnabled() { - opts.ExecCommandLine += " -scrub-logs" - } - - execCmdArgs += " " + opts.ExecCommandLine - - if opts.ProcessDumpLocation != "" { - execCmdArgs += " -core-dump-location " + opts.ProcessDumpLocation - } - - initArgs := fmt.Sprintf("%s %s", entropyArgs, execCmdArgs) - if vmDebugging { - // Launch a shell on the console. - initArgs = entropyArgs + ` sh -c "` + execCmdArgs + ` & exec sh"` - } - - kernelArgs += fmt.Sprintf(" nr_cpus=%d", opts.ProcessorCount) - kernelArgs += ` brd.rd_nr=0 pmtmr=0 -- ` + initArgs - - if !opts.KernelDirect { - doc.VirtualMachine.Chipset.Uefi = &hcsschema.Uefi{ - BootThis: &hcsschema.UefiBootEntry{ - DevicePath: `\` + opts.KernelFile, - DeviceType: "VmbFs", - VmbFsRootPath: opts.BootFilesPath, - OptionalData: kernelArgs, - }, - } - } else { - doc.VirtualMachine.Chipset.LinuxKernelDirect = &hcsschema.LinuxKernelDirect{ - KernelFilePath: kernelFullPath, - KernelCmdLine: kernelArgs, - } - if opts.PreferredRootFSType == PreferredRootFSTypeInitRd { - doc.VirtualMachine.Chipset.LinuxKernelDirect.InitRdPath = rootfsFullPath - } - } - return doc, nil -} - -// CreateLCOW creates an HCS compute system representing a utility VM. It -// consumes a set of options derived from various defaults and options -// expressed as annotations. -func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error) { - ctx, span := oc.StartSpan(ctx, "uvm::CreateLCOW") - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - if opts.ID == "" { - g, err := guid.NewV4() - if err != nil { - return nil, err - } - opts.ID = g.String() - } - - span.AddAttributes(trace.StringAttribute(logfields.UVMID, opts.ID)) - log.G(ctx).WithField("options", fmt.Sprintf("%+v", opts)).Debug("uvm::CreateLCOW options") - - // We dont serialize OutputHandler so if it is missing we need to put it back to the default. - if opts.OutputHandler == nil { - opts.OutputHandler = parseLogrus(opts.ID) - } - - uvm := &UtilityVM{ - id: opts.ID, - owner: opts.Owner, - operatingSystem: "linux", - scsiControllerCount: opts.SCSIControllerCount, - vpmemMaxCount: opts.VPMemDeviceCount, - vpmemMaxSizeBytes: opts.VPMemSizeBytes, - vpciDevices: make(map[VPCIDeviceKey]*VPCIDevice), - physicallyBacked: !opts.AllowOvercommit, - devicesPhysicallyBacked: opts.FullyPhysicallyBacked, - createOpts: opts, - vpmemMultiMapping: !opts.VPMemNoMultiMapping, - encryptScratch: opts.EnableScratchEncryption, - noWritableFileShares: opts.NoWritableFileShares, - } - - defer func() { - if err != nil { - uvm.Close() - } - }() - - // vpmemMaxCount has been set to 0 which means we are going to need multiple SCSI controllers - // to support lots of layers. - if osversion.Build() >= osversion.RS5 && uvm.vpmemMaxCount == 0 { - uvm.scsiControllerCount = 4 - } - - if err = verifyOptions(ctx, opts); err != nil { - return nil, errors.Wrap(err, errBadUVMOpts.Error()) - } - - // HCS config for SNP isolated vm is quite different to the usual case - var doc *hcsschema.ComputeSystem - if opts.SecurityPolicyEnabled { - doc, err = makeLCOWSecurityDoc(ctx, opts, uvm) - log.G(ctx).Tracef("create_lcow::CreateLCOW makeLCOWSecurityDoc result doc: %v err %v", doc, err) - } else { - doc, err = makeLCOWDoc(ctx, opts, uvm) - log.G(ctx).Tracef("create_lcow::CreateLCOW makeLCOWDoc result doc: %v err %v", doc, err) - } - if err != nil { - return nil, err - } - - err = uvm.create(ctx, doc) - - log.G(ctx).Tracef("create_lcow::CreateLCOW uvm.create result uvm: %v err %v", uvm, err) - - if err != nil { - return nil, fmt.Errorf("error while creating the compute system: %s", err) - } - - // Cerate a socket to inject entropy during boot. - uvm.entropyListener, err = uvm.listenVsock(entropyVsockPort) - if err != nil { - return nil, err - } - - // Create a socket that the executed program can send to. This is usually - // used by GCS to send log data. - if opts.ForwardStdout || opts.ForwardStderr { - uvm.outputHandler = opts.OutputHandler - uvm.outputProcessingDone = make(chan struct{}) - uvm.outputListener, err = uvm.listenVsock(linuxLogVsockPort) - if err != nil { - return nil, err - } - } - - if opts.UseGuestConnection { - log.G(ctx).WithField("vmID", uvm.runtimeID).Debug("Using external GCS bridge") - l, err := uvm.listenVsock(gcs.LinuxGcsVsockPort) - if err != nil { - return nil, err - } - uvm.gcListener = l - } - - uvm.ncProxyClientAddress = opts.NetworkConfigProxy - - return uvm, nil -} - -func (uvm *UtilityVM) listenVsock(port uint32) (net.Listener, error) { - return winio.ListenHvsock(&winio.HvsockAddr{ - VMID: uvm.runtimeID, - ServiceID: winio.VsockServiceID(port), - }) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go deleted file mode 100644 index 662fe69149..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go +++ /dev/null @@ -1,385 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/pkg/errors" - "go.opencensus.io/trace" - - "github.com/Microsoft/hcsshim/internal/gcs" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/processorinfo" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/schemaversion" - "github.com/Microsoft/hcsshim/internal/uvmfolder" - "github.com/Microsoft/hcsshim/internal/wclayer" - "github.com/Microsoft/hcsshim/internal/wcow" - "github.com/Microsoft/hcsshim/osversion" -) - -// OptionsWCOW are the set of options passed to CreateWCOW() to create a utility vm. -type OptionsWCOW struct { - *Options - - LayerFolders []string // Set of folders for base layers and scratch. Ordered from top most read-only through base read-only layer, followed by scratch - - // IsTemplate specifies if this UVM will be saved as a template in future. Setting - // this option will also enable some VSMB Options during UVM creation that allow - // template creation. - IsTemplate bool - - // IsClone specifies if this UVM should be created by cloning a template. If - // IsClone is true then a valid UVMTemplateConfig struct must be passed in the - // `TemplateConfig` field. - IsClone bool - - // TemplateConfig is only used during clone creation. If a uvm is - // being cloned then this TemplateConfig struct must be passed - // which holds all the information about the template from - // which this clone should be created. - TemplateConfig *UVMTemplateConfig - - // NoDirectMap specifies that no direct mapping should be used for any VSMBs added to the UVM - NoDirectMap bool - - // NoInheritHostTimezone specifies whether to not inherit the hosts timezone for the UVM. UTC will be set as the default for the VM instead. - NoInheritHostTimezone bool -} - -// NewDefaultOptionsWCOW creates the default options for a bootable version of -// WCOW. The caller `MUST` set the `LayerFolders` path on the returned value. -// -// `id` the ID of the compute system. If not passed will generate a new GUID. -// -// `owner` the owner of the compute system. If not passed will use the -// executable files name. -func NewDefaultOptionsWCOW(id, owner string) *OptionsWCOW { - return &OptionsWCOW{ - Options: newDefaultOptions(id, owner), - } -} - -func (uvm *UtilityVM) startExternalGcsListener(ctx context.Context) error { - log.G(ctx).WithField("vmID", uvm.runtimeID).Debug("Using external GCS bridge") - - l, err := winio.ListenHvsock(&winio.HvsockAddr{ - VMID: uvm.runtimeID, - ServiceID: gcs.WindowsGcsHvsockServiceID, - }) - if err != nil { - return err - } - uvm.gcListener = l - return nil -} - -func prepareConfigDoc(ctx context.Context, uvm *UtilityVM, opts *OptionsWCOW, uvmFolder string) (*hcsschema.ComputeSystem, error) { - processorTopology, err := processorinfo.HostProcessorInfo(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get host processor information: %s", err) - } - - // To maintain compatibility with Docker we need to automatically downgrade - // a user CPU count if the setting is not possible. - uvm.processorCount = uvm.normalizeProcessorCount(ctx, opts.ProcessorCount, processorTopology) - - // Align the requested memory size. - memorySizeInMB := uvm.normalizeMemorySize(ctx, opts.MemorySizeInMB) - - // UVM rootfs share is readonly. - vsmbOpts := uvm.DefaultVSMBOptions(true) - vsmbOpts.TakeBackupPrivilege = true - virtualSMB := &hcsschema.VirtualSmb{ - DirectFileMappingInMB: 1024, // Sensible default, but could be a tuning parameter somewhere - Shares: []hcsschema.VirtualSmbShare{ - { - Name: "os", - Path: filepath.Join(uvmFolder, `UtilityVM\Files`), - Options: vsmbOpts, - }, - }, - } - - var registryChanges hcsschema.RegistryChanges - // We're getting asked to setup local dump collection for WCOW. We need to: - // - // 1. Turn off WER reporting, so we don't both upload the dump and save a local copy. - // 2. Set WerSvc to start when the UVM starts to work around a bug when generating dumps for certain exceptions. - // https://github.com/microsoft/Windows-Containers/issues/60#issuecomment-834633192 - // This supposedly should be fixed soon but for now keep this until we know which container images - // (1809, 1903/9, 2004 etc.) this went out too. - if opts.ProcessDumpLocation != "" { - uvm.processDumpLocation = opts.ProcessDumpLocation - registryChanges.AddValues = append(registryChanges.AddValues, - hcsschema.RegistryValue{ - Key: &hcsschema.RegistryKey{ - Hive: "System", - Name: "ControlSet001\\Services\\WerSvc", - }, - Name: "Start", - DWordValue: 2, - Type_: "DWord", - }, - hcsschema.RegistryValue{ - Key: &hcsschema.RegistryKey{ - Hive: "Software", - Name: "Microsoft\\Windows\\Windows Error Reporting", - }, - Name: "Disabled", - DWordValue: 1, - Type_: "DWord", - }, - ) - } - - // Here for a temporary workaround until the need for setting this regkey is no more. To protect - // against any undesired behavior (such as some general networking scenarios ceasing to function) - // with a recent change to fix SMB share access in the UVM, this registry key will be checked to - // enable the change in question inside GNS.dll. - if !opts.DisableCompartmentNamespace { - registryChanges.AddValues = append(registryChanges.AddValues, - hcsschema.RegistryValue{ - Key: &hcsschema.RegistryKey{ - Hive: "System", - Name: "CurrentControlSet\\Services\\gns", - }, - Name: "EnableCompartmentNamespace", - DWordValue: 1, - Type_: "DWord", - }, - ) - } - - processor := &hcsschema.Processor2{ - Count: uvm.processorCount, - Limit: opts.ProcessorLimit, - Weight: opts.ProcessorWeight, - } - // We can set a cpu group for the VM at creation time in recent builds. - if opts.CPUGroupID != "" { - if osversion.Build() < osversion.V21H1 { - return nil, errCPUGroupCreateNotSupported - } - processor.CpuGroup = &hcsschema.CpuGroup{Id: opts.CPUGroupID} - } - - doc := &hcsschema.ComputeSystem{ - Owner: uvm.owner, - SchemaVersion: schemaversion.SchemaV21(), - ShouldTerminateOnLastHandleClosed: true, - VirtualMachine: &hcsschema.VirtualMachine{ - StopOnReset: true, - Chipset: &hcsschema.Chipset{ - Uefi: &hcsschema.Uefi{ - BootThis: &hcsschema.UefiBootEntry{ - DevicePath: `\EFI\Microsoft\Boot\bootmgfw.efi`, - DeviceType: "VmbFs", - }, - }, - }, - RegistryChanges: ®istryChanges, - ComputeTopology: &hcsschema.Topology{ - Memory: &hcsschema.Memory2{ - SizeInMB: memorySizeInMB, - AllowOvercommit: opts.AllowOvercommit, - // EnableHotHint is not compatible with physical. - EnableHotHint: opts.AllowOvercommit, - EnableDeferredCommit: opts.EnableDeferredCommit, - LowMMIOGapInMB: opts.LowMMIOGapInMB, - HighMMIOBaseInMB: opts.HighMMIOBaseInMB, - HighMMIOGapInMB: opts.HighMMIOGapInMB, - }, - Processor: processor, - }, - Devices: &hcsschema.Devices{ - HvSocket: &hcsschema.HvSocket2{ - HvSocketConfig: &hcsschema.HvSocketSystemConfig{ - // Allow administrators and SYSTEM to bind to vsock sockets - // so that we can create a GCS log socket. - DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)", - }, - }, - VirtualSmb: virtualSMB, - }, - }, - } - - // Handle StorageQoS if set - if opts.StorageQoSBandwidthMaximum > 0 || opts.StorageQoSIopsMaximum > 0 { - doc.VirtualMachine.StorageQoS = &hcsschema.StorageQoS{ - IopsMaximum: opts.StorageQoSIopsMaximum, - BandwidthMaximum: opts.StorageQoSBandwidthMaximum, - } - } - - return doc, nil -} - -// CreateWCOW creates an HCS compute system representing a utility VM. -// The HCS Compute system can either be created from scratch or can be cloned from a -// template. -// -// WCOW Notes: -// - The scratch is always attached to SCSI 0:0 -// -func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error) { - ctx, span := oc.StartSpan(ctx, "uvm::CreateWCOW") - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - if opts.ID == "" { - g, err := guid.NewV4() - if err != nil { - return nil, err - } - opts.ID = g.String() - } - - span.AddAttributes(trace.StringAttribute(logfields.UVMID, opts.ID)) - log.G(ctx).WithField("options", fmt.Sprintf("%+v", opts)).Debug("uvm::CreateWCOW options") - - uvm := &UtilityVM{ - id: opts.ID, - owner: opts.Owner, - operatingSystem: "windows", - scsiControllerCount: opts.SCSIControllerCount, - vsmbDirShares: make(map[string]*VSMBShare), - vsmbFileShares: make(map[string]*VSMBShare), - vpciDevices: make(map[VPCIDeviceKey]*VPCIDevice), - noInheritHostTimezone: opts.NoInheritHostTimezone, - physicallyBacked: !opts.AllowOvercommit, - devicesPhysicallyBacked: opts.FullyPhysicallyBacked, - vsmbNoDirectMap: opts.NoDirectMap, - noWritableFileShares: opts.NoWritableFileShares, - createOpts: *opts, - } - - defer func() { - if err != nil { - uvm.Close() - } - }() - - if err := verifyOptions(ctx, opts); err != nil { - return nil, errors.Wrap(err, errBadUVMOpts.Error()) - } - - uvmFolder, err := uvmfolder.LocateUVMFolder(ctx, opts.LayerFolders) - if err != nil { - return nil, fmt.Errorf("failed to locate utility VM folder from layer folders: %s", err) - } - - // TODO: BUGBUG Remove this. @jhowardmsft - // It should be the responsibility of the caller to do the creation and population. - // - Update runhcs too (vm.go). - // - Remove comment in function header - // - Update tests that rely on this current behavior. - // Create the RW scratch in the top-most layer folder, creating the folder if it doesn't already exist. - scratchFolder := opts.LayerFolders[len(opts.LayerFolders)-1] - - // Create the directory if it doesn't exist - if _, err := os.Stat(scratchFolder); os.IsNotExist(err) { - if err := os.MkdirAll(scratchFolder, 0777); err != nil { - return nil, fmt.Errorf("failed to create utility VM scratch folder: %s", err) - } - } - - doc, err := prepareConfigDoc(ctx, uvm, opts, uvmFolder) - if err != nil { - return nil, fmt.Errorf("error in preparing config doc: %s", err) - } - - if !opts.IsClone { - // Create sandbox.vhdx in the scratch folder based on the template, granting the correct permissions to it - scratchPath := filepath.Join(scratchFolder, "sandbox.vhdx") - if _, err := os.Stat(scratchPath); os.IsNotExist(err) { - if err := wcow.CreateUVMScratch(ctx, uvmFolder, scratchFolder, uvm.id); err != nil { - return nil, fmt.Errorf("failed to create scratch: %s", err) - } - } else { - // Sandbox.vhdx exists, just need to grant vm access to it. - if err := wclayer.GrantVmAccess(ctx, uvm.id, scratchPath); err != nil { - return nil, errors.Wrap(err, "failed to grant vm access to scratch") - } - } - - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} - for i := 0; i < int(uvm.scsiControllerCount); i++ { - doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ - Attachments: make(map[string]hcsschema.Attachment), - } - } - - doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"] = hcsschema.Attachment{ - - Path: scratchPath, - Type_: "VirtualDisk", - } - - uvm.scsiLocations[0][0] = newSCSIMount(uvm, - doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"].Path, - "", - doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"].Type_, - "", - 1, - 0, - 0, - false, - false) - } else { - doc.VirtualMachine.RestoreState = &hcsschema.RestoreState{} - doc.VirtualMachine.RestoreState.TemplateSystemId = opts.TemplateConfig.UVMID - - for _, cloneableResource := range opts.TemplateConfig.Resources { - err = cloneableResource.Clone(ctx, uvm, &cloneData{ - doc: doc, - scratchFolder: scratchFolder, - uvmID: opts.ID, - }) - if err != nil { - return nil, fmt.Errorf("failed while cloning: %s", err) - } - } - - // we add default clone namespace for each clone. Include it here. - if uvm.namespaces == nil { - uvm.namespaces = make(map[string]*namespaceInfo) - } - uvm.namespaces[DefaultCloneNetworkNamespaceID] = &namespaceInfo{ - nics: make(map[string]*nicInfo), - } - uvm.IsClone = true - uvm.TemplateID = opts.TemplateConfig.UVMID - } - - // Add appropriate VSMB share options if this UVM needs to be saved as a template - if opts.IsTemplate { - for _, share := range doc.VirtualMachine.Devices.VirtualSmb.Shares { - uvm.SetSaveableVSMBOptions(share.Options, share.Options.ReadOnly) - } - uvm.IsTemplate = true - } - - err = uvm.create(ctx, doc) - if err != nil { - return nil, fmt.Errorf("error while creating the compute system: %s", err) - } - - if err = uvm.startExternalGcsListener(ctx); err != nil { - return nil, err - } - - uvm.ncProxyClientAddress = opts.NetworkConfigProxy - - return uvm, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/delete_container.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/delete_container.go deleted file mode 100644 index 46bef7b163..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/delete_container.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "errors" -) - -func (uvm *UtilityVM) DeleteContainerState(ctx context.Context, cid string) error { - if !uvm.DeleteContainerStateSupported() { - return errors.New("uvm guest connection does not support deleteContainerState") - } - - return uvm.gc.DeleteContainerState(ctx, cid) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/doc.go deleted file mode 100644 index c4e25cc15c..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// This package describes the external interface for utility VMs. -package uvm diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/dumpstacks.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/dumpstacks.go deleted file mode 100644 index ed39fd282d..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/dumpstacks.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" -) - -func (uvm *UtilityVM) DumpStacks(ctx context.Context) (string, error) { - if uvm.gc == nil || !uvm.guestCaps.DumpStacksSupported { - return "", nil - } - - return uvm.gc.DumpStacks(ctx) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/guest_request.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/guest_request.go deleted file mode 100644 index f097c4f33c..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/guest_request.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// GuestRequest send an arbitrary guest request to the UVM. -func (uvm *UtilityVM) GuestRequest(ctx context.Context, guestReq interface{}) error { - msr := &hcsschema.ModifySettingRequest{ - GuestRequest: guestReq, - } - return uvm.modify(ctx, msr) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/hvsocket.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/hvsocket.go deleted file mode 100644 index 4e439c7894..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/hvsocket.go +++ /dev/null @@ -1,46 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" -) - -// UpdateHvSocketService calls HCS to update/create the hvsocket service for -// the UVM. Takes in a service ID and the hvsocket service configuration. If there is no -// entry for the service ID already it will be created. The same call on HvSockets side -// handles the Create/Update/Delete cases based on what is passed in. Here is the logic -// for the call. -// -// 1. If the service ID does not currently exist in the service table, it will be created -// with whatever descriptors and state was specified (disabled or not). -// 2. If the service already exists and empty descriptors and Disabled is passed in for the -// service config, the service will be removed. -// 3. Otherwise any combination that is not Disabled && Empty descriptors will just update the -// service. -// -// If the request is crafted with Disabled = True and empty descriptors, then this function -// will behave identically to a call to RemoveHvSocketService. Prefer RemoveHvSocketService for this -// behavior as the relevant fields are set on HCS' side. -func (uvm *UtilityVM) UpdateHvSocketService(ctx context.Context, sid string, doc *hcsschema.HvSocketServiceConfig) error { - request := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeUpdate, - ResourcePath: fmt.Sprintf(resourcepaths.HvSocketConfigResourceFormat, sid), - Settings: doc, - } - return uvm.modify(ctx, request) -} - -// RemoveHvSocketService will remove an hvsocket service entry if it exists. -func (uvm *UtilityVM) RemoveHvSocketService(ctx context.Context, sid string) error { - request := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeRemove, - ResourcePath: fmt.Sprintf(resourcepaths.HvSocketConfigResourceFormat, sid), - } - return uvm.modify(ctx, request) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/memory_update.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/memory_update.go deleted file mode 100644 index 59444f417e..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/memory_update.go +++ /dev/null @@ -1,47 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/memory" -) - -const bytesPerPage = 4096 - -// UpdateMemory makes a call to the VM's orchestrator to update the VM's size in MB -// Internally, HCS will get the number of pages this corresponds to and attempt to assign -// pages to numa nodes evenly -func (uvm *UtilityVM) UpdateMemory(ctx context.Context, sizeInBytes uint64) error { - requestedSizeInMB := sizeInBytes / memory.MiB - actual := uvm.normalizeMemorySize(ctx, requestedSizeInMB) - req := &hcsschema.ModifySettingRequest{ - ResourcePath: resourcepaths.MemoryResourcePath, - Settings: actual, - } - return uvm.modify(ctx, req) -} - -// GetAssignedMemoryInBytes returns the amount of assigned memory for the UVM in bytes -func (uvm *UtilityVM) GetAssignedMemoryInBytes(ctx context.Context) (uint64, error) { - props, err := uvm.hcsSystem.PropertiesV2(ctx, hcsschema.PTMemory) - if err != nil { - return 0, err - } - if props.Memory == nil { - return 0, fmt.Errorf("no memory properties returned for system %s", uvm.id) - } - if props.Memory.VirtualMachineMemory == nil { - return 0, fmt.Errorf("no virtual memory properties returned for system %s", uvm.id) - } - pages := props.Memory.VirtualMachineMemory.AssignedMemory - if pages == 0 { - return 0, fmt.Errorf("assigned memory returned should not be 0 for system %s", uvm.id) - } - memInBytes := pages * bytesPerPage - return memInBytes, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/modify.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/modify.go deleted file mode 100644 index 5a57dce9a6..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/modify.go +++ /dev/null @@ -1,50 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" -) - -// Modify modifies the compute system by sending a request to HCS. -func (uvm *UtilityVM) modify(ctx context.Context, doc *hcsschema.ModifySettingRequest) (err error) { - if doc.GuestRequest == nil || uvm.gc == nil { - return uvm.hcsSystem.Modify(ctx, doc) - } - - hostdoc := *doc - hostdoc.GuestRequest = nil - if doc.ResourcePath != "" && doc.RequestType == guestrequest.RequestTypeAdd { - err = uvm.hcsSystem.Modify(ctx, &hostdoc) - if err != nil { - return fmt.Errorf("adding VM resources: %s", err) - } - defer func() { - if err != nil { - hostdoc.RequestType = guestrequest.RequestTypeRemove - rerr := uvm.hcsSystem.Modify(ctx, &hostdoc) - if rerr != nil { - log.G(ctx).WithError(rerr).Error("failed to roll back resource add") - } - } - }() - } - err = uvm.gc.Modify(ctx, doc.GuestRequest) - if err != nil { - return fmt.Errorf("guest modify: %s", err) - } - if doc.ResourcePath != "" && doc.RequestType == guestrequest.RequestTypeRemove { - err = uvm.hcsSystem.Modify(ctx, &hostdoc) - if err != nil { - err = fmt.Errorf("removing VM resources: %s", err) - log.G(ctx).WithError(err).Error("failed to remove host resources after successful guest request") - return err - } - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go deleted file mode 100644 index 03509ad882..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go +++ /dev/null @@ -1,712 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - "os" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/containerd/ttrpc" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/Microsoft/hcsshim/hcn" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/hns" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/ncproxyttrpc" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/protocol/guestresource" - "github.com/Microsoft/hcsshim/osversion" -) - -var ( - // ErrNetNSAlreadyAttached is an error indicating the guest UVM already has - // an endpoint by this id. - ErrNetNSAlreadyAttached = errors.New("network namespace already added") - // ErrNetNSNotFound is an error indicating the guest UVM does not have a - // network namespace by this id. - ErrNetNSNotFound = errors.New("network namespace not found") - // ErrNICNotFound is an error indicating that the guest UVM does not have a NIC - // by this id. - ErrNICNotFound = errors.New("NIC not found in network namespace") -) - -// Network namespace setup is a bit different for templates and clones. -// For templates and clones we use a special network namespace ID. -// Details about this can be found in the Networking section of the late-clone wiki page. -// -// In this function we take the namespace ID of the namespace that was created for this -// UVM. We hot add the namespace (with the default ID if this is a template). We get the -// endpoints associated with this namespace and then hot add those endpoints (by changing -// their namespace IDs by the default IDs if it is a template). -func (uvm *UtilityVM) SetupNetworkNamespace(ctx context.Context, nsid string) error { - nsidInsideUVM := nsid - if uvm.IsTemplate || uvm.IsClone { - nsidInsideUVM = DefaultCloneNetworkNamespaceID - } - - // Query endpoints with actual nsid - endpoints, err := GetNamespaceEndpoints(ctx, nsid) - if err != nil { - return err - } - - // Add the network namespace inside the UVM if it is not a clone. (Clones will - // inherit the namespace from template) - if !uvm.IsClone { - // Get the namespace struct from the actual nsid. - hcnNamespace, err := hcn.GetNamespaceByID(nsid) - if err != nil { - return err - } - - // All templates should have a special NSID so that it - // will be easier to debug. Override it here. - if uvm.IsTemplate { - hcnNamespace.Id = nsidInsideUVM - } - - if err = uvm.AddNetNS(ctx, hcnNamespace); err != nil { - return err - } - } - - // If adding a network endpoint to clones or a template override nsid associated - // with it. - if uvm.IsClone || uvm.IsTemplate { - // replace nsid for each endpoint - for _, ep := range endpoints { - ep.Namespace = &hns.Namespace{ - ID: nsidInsideUVM, - } - } - } - - if err = uvm.AddEndpointsToNS(ctx, nsidInsideUVM, endpoints); err != nil { - // Best effort clean up the NS - if removeErr := uvm.RemoveNetNS(ctx, nsidInsideUVM); removeErr != nil { - log.G(ctx).Warn(removeErr) - } - return err - } - return nil -} - -// GetNamespaceEndpoints gets all endpoints in `netNS` -func GetNamespaceEndpoints(ctx context.Context, netNS string) ([]*hns.HNSEndpoint, error) { - op := "uvm::GetNamespaceEndpoints" - l := log.G(ctx).WithField("netns-id", netNS) - l.Debug(op + " - Begin") - defer func() { - l.Debug(op + " - End") - }() - - ids, err := hns.GetNamespaceEndpoints(netNS) - if err != nil { - return nil, err - } - var endpoints []*hns.HNSEndpoint - for _, id := range ids { - endpoint, err := hns.GetHNSEndpointByID(id) - if err != nil { - return nil, err - } - endpoints = append(endpoints, endpoint) - } - return endpoints, nil -} - -// NCProxyEnabled returns if there is a network configuration client. -func (uvm *UtilityVM) NCProxyEnabled() bool { - return uvm.ncProxyClientAddress != "" -} - -type ncproxyClient struct { - raw *ttrpc.Client - ncproxyttrpc.NetworkConfigProxyService -} - -func (n *ncproxyClient) Close() error { - return n.raw.Close() -} - -func (uvm *UtilityVM) GetNCProxyClient() (*ncproxyClient, error) { - conn, err := winio.DialPipe(uvm.ncProxyClientAddress, nil) - if err != nil { - return nil, errors.Wrap(err, "failed to connect to ncproxy service") - } - raw := ttrpc.NewClient(conn, ttrpc.WithOnClose(func() { conn.Close() })) - return &ncproxyClient{raw, ncproxyttrpc.NewNetworkConfigProxyClient(raw)}, nil -} - -// NetworkConfigType specifies the action to be performed during network configuration. -// For example: setup or teardown -type NetworkConfigType uint8 - -const ( - NetworkRequestSetup NetworkConfigType = iota - NetworkRequestTearDown -) - -var ErrNoNetworkSetup = errors.New("no network setup present for UVM") - -// CreateAndAssignNetworkSetup creates and assigns a new NetworkSetup interface to the Utility VM. -// This can be used to configure the networking (setup and teardown) of the vm. -// -// `addr` is an optional parameter -func (uvm *UtilityVM) CreateAndAssignNetworkSetup(ctx context.Context, addr, containerID string) (err error) { - if uvm.NCProxyEnabled() { - if addr == "" || containerID == "" { - return errors.New("received empty field(s) for external network setup") - } - setup, err := NewExternalNetworkSetup(ctx, uvm, addr, containerID) - if err != nil { - return err - } - uvm.networkSetup = setup - } else { - uvm.networkSetup = NewInternalNetworkSetup(uvm) - } - return nil -} - -// ConfigureNetworking configures the utility VMs networking setup using the namespace ID -// `nsid`. -func (uvm *UtilityVM) ConfigureNetworking(ctx context.Context, nsid string) error { - if uvm.networkSetup != nil { - return uvm.networkSetup.ConfigureNetworking(ctx, nsid, NetworkRequestSetup) - } - return ErrNoNetworkSetup -} - -// TearDownNetworking tears down the utility VMs networking setup using the namespace ID -// `nsid`. -func (uvm *UtilityVM) TearDownNetworking(ctx context.Context, nsid string) error { - if uvm.networkSetup != nil { - return uvm.networkSetup.ConfigureNetworking(ctx, nsid, NetworkRequestTearDown) - } - return ErrNoNetworkSetup -} - -// NetworkSetup is used to abstract away the details of setting up networking -// for a container. -type NetworkSetup interface { - ConfigureNetworking(ctx context.Context, namespaceID string, configType NetworkConfigType) error -} - -// LocalNetworkSetup implements the NetworkSetup interface for configuring container -// networking. -type internalNetworkSetup struct { - vm *UtilityVM -} - -func NewInternalNetworkSetup(vm *UtilityVM) NetworkSetup { - return &internalNetworkSetup{vm} -} - -func (i *internalNetworkSetup) ConfigureNetworking(ctx context.Context, namespaceID string, configType NetworkConfigType) error { - switch configType { - case NetworkRequestSetup: - if err := i.vm.SetupNetworkNamespace(ctx, namespaceID); err != nil { - return err - } - case NetworkRequestTearDown: - if err := i.vm.RemoveNetNS(ctx, namespaceID); err != nil { - return err - } - default: - return fmt.Errorf("network configuration type %d is not known", configType) - } - - return nil -} - -// ExternalNetworkSetup implements the NetworkSetup interface for configuring -// container networking. It will try and communicate with an external network configuration -// proxy service to setup networking. -type externalNetworkSetup struct { - vm *UtilityVM - caAddr string - containerID string -} - -// NewExternalNetworkSetup returns an object implementing the NetworkSetup interface to be -// used for external network configuration. -func NewExternalNetworkSetup(ctx context.Context, vm *UtilityVM, caAddr, containerID string) (NetworkSetup, error) { - if err := setupAndServe(ctx, caAddr, vm); err != nil { - return nil, err - } - - return &externalNetworkSetup{ - vm, - caAddr, - containerID, - }, nil -} - -func (e *externalNetworkSetup) ConfigureNetworking(ctx context.Context, namespaceID string, configType NetworkConfigType) error { - client, err := e.vm.GetNCProxyClient() - if err != nil { - return errors.Wrapf(err, "no ncproxy client for UVM %q", e.vm.ID()) - } - defer client.Close() - - netReq := &ncproxyttrpc.ConfigureNetworkingInternalRequest{ - ContainerID: e.containerID, - } - - switch configType { - case NetworkRequestSetup: - if err := e.vm.AddNetNSByID(ctx, namespaceID); err != nil { - return err - } - - registerReq := &ncproxyttrpc.RegisterComputeAgentRequest{ - ContainerID: e.containerID, - AgentAddress: e.caAddr, - } - if _, err := client.RegisterComputeAgent(ctx, registerReq); err != nil { - return err - } - - netReq.RequestType = ncproxyttrpc.RequestTypeInternal_Setup - if _, err := client.ConfigureNetworking(ctx, netReq); err != nil { - return err - } - case NetworkRequestTearDown: - netReq.RequestType = ncproxyttrpc.RequestTypeInternal_Teardown - if _, err := client.ConfigureNetworking(ctx, netReq); err != nil { - return err - } - // unregister compute agent with ncproxy - unregisterReq := &ncproxyttrpc.UnregisterComputeAgentRequest{ - ContainerID: e.containerID, - } - if _, err := client.UnregisterComputeAgent(ctx, unregisterReq); err != nil { - return err - } - default: - return fmt.Errorf("network configuration type %d is not known", configType) - } - - return nil -} - -// NetworkEndpoints is a struct containing all of the endpoint IDs of a network -// namespace. -type NetworkEndpoints struct { - EndpointIDs []string - // ID of the namespace the endpoints belong to - Namespace string -} - -// Release releases the resources for all of the network endpoints in a namespace. -func (endpoints *NetworkEndpoints) Release(ctx context.Context) error { - for _, endpoint := range endpoints.EndpointIDs { - err := hns.RemoveNamespaceEndpoint(endpoints.Namespace, endpoint) - if err != nil { - if !os.IsNotExist(err) { - return err - } - log.G(ctx).WithFields(logrus.Fields{ - "endpointID": endpoint, - "netID": endpoints.Namespace, - }).Warn("removing endpoint from namespace: does not exist") - } - } - endpoints.EndpointIDs = nil - err := hns.RemoveNamespace(endpoints.Namespace) - if err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// AddNetNS adds network namespace inside the guest without actually querying for the -// namespace by its ID. It uses the given namespace struct as it is in the guest request. -// This function is mostly used when we need to override the values inside the namespace -// struct returned by the GetNamespaceByID. For most uses cases AddNetNSByID is more appropriate. -// -// If a namespace with the same id already exists this returns `ErrNetNSAlreadyAttached`. -func (uvm *UtilityVM) AddNetNS(ctx context.Context, hcnNamespace *hcn.HostComputeNamespace) error { - uvm.m.Lock() - defer uvm.m.Unlock() - if _, ok := uvm.namespaces[hcnNamespace.Id]; ok { - return ErrNetNSAlreadyAttached - } - - if uvm.isNetworkNamespaceSupported() { - // Add a Guest Network namespace. On LCOW we add the adapters - // dynamically. - if uvm.operatingSystem == "windows" { - guestNamespace := hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeNetworkNamespace, - RequestType: guestrequest.RequestTypeAdd, - Settings: hcnNamespace, - }, - } - if err := uvm.modify(ctx, &guestNamespace); err != nil { - return err - } - } - } - - if uvm.namespaces == nil { - uvm.namespaces = make(map[string]*namespaceInfo) - } - uvm.namespaces[hcnNamespace.Id] = &namespaceInfo{ - nics: make(map[string]*nicInfo), - } - return nil -} - -// AddNetNSByID adds finds the namespace with given `id` and adds that -// network namespace inside the guest. -// -// If a namespace with `id` already exists returns `ErrNetNSAlreadyAttached`. -func (uvm *UtilityVM) AddNetNSByID(ctx context.Context, id string) error { - hcnNamespace, err := hcn.GetNamespaceByID(id) - if err != nil { - return err - } - - if err = uvm.AddNetNS(ctx, hcnNamespace); err != nil { - return err - } - return nil -} - -// AddEndpointToNSWithID adds an endpoint to the network namespace with the specified -// NIC ID. If nicID is an empty string, a GUID will be generated for the ID instead. -// -// If no network namespace matches `id` returns `ErrNetNSNotFound`. -func (uvm *UtilityVM) AddEndpointToNSWithID(ctx context.Context, nsID, nicID string, endpoint *hns.HNSEndpoint) error { - uvm.m.Lock() - defer uvm.m.Unlock() - ns, ok := uvm.namespaces[nsID] - if !ok { - return ErrNetNSNotFound - } - if _, ok := ns.nics[endpoint.Id]; !ok { - if nicID == "" { - id, err := guid.NewV4() - if err != nil { - return err - } - nicID = id.String() - } - if err := uvm.addNIC(ctx, nicID, endpoint); err != nil { - return err - } - ns.nics[endpoint.Id] = &nicInfo{ - ID: nicID, - Endpoint: endpoint, - } - } - return nil -} - -// AddEndpointsToNS adds all unique `endpoints` to the network namespace -// matching `id`. On failure does not roll back any previously successfully -// added endpoints. -// -// If no network namespace matches `id` returns `ErrNetNSNotFound`. -func (uvm *UtilityVM) AddEndpointsToNS(ctx context.Context, id string, endpoints []*hns.HNSEndpoint) error { - uvm.m.Lock() - defer uvm.m.Unlock() - - ns, ok := uvm.namespaces[id] - if !ok { - return ErrNetNSNotFound - } - - for _, endpoint := range endpoints { - if _, ok := ns.nics[endpoint.Id]; !ok { - nicID, err := guid.NewV4() - if err != nil { - return err - } - if err := uvm.addNIC(ctx, nicID.String(), endpoint); err != nil { - return err - } - ns.nics[endpoint.Id] = &nicInfo{ - ID: nicID.String(), - Endpoint: endpoint, - } - } - } - return nil -} - -// RemoveNetNS removes the namespace from the uvm and all remaining endpoints in -// the namespace. -// -// If a namespace matching `id` is not found this command silently succeeds. -func (uvm *UtilityVM) RemoveNetNS(ctx context.Context, id string) error { - uvm.m.Lock() - defer uvm.m.Unlock() - if ns, ok := uvm.namespaces[id]; ok { - for _, ninfo := range ns.nics { - if err := uvm.removeNIC(ctx, ninfo.ID, ninfo.Endpoint); err != nil { - return err - } - ns.nics[ninfo.Endpoint.Id] = nil - } - // Remove the Guest Network namespace - if uvm.isNetworkNamespaceSupported() { - if uvm.operatingSystem == "windows" { - hcnNamespace, err := hcn.GetNamespaceByID(id) - if err != nil { - return err - } - guestNamespace := hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeNetworkNamespace, - RequestType: guestrequest.RequestTypeRemove, - Settings: hcnNamespace, - }, - } - if err := uvm.modify(ctx, &guestNamespace); err != nil { - return err - } - } - } - delete(uvm.namespaces, id) - } - return nil -} - -// RemoveEndpointsFromNS removes all matching `endpoints` in the network -// namespace matching `id`. If no endpoint matching `endpoint.Id` is found in -// the network namespace this command silently succeeds. -// -// If no network namespace matches `id` returns `ErrNetNSNotFound`. -func (uvm *UtilityVM) RemoveEndpointsFromNS(ctx context.Context, id string, endpoints []*hns.HNSEndpoint) error { - uvm.m.Lock() - defer uvm.m.Unlock() - - ns, ok := uvm.namespaces[id] - if !ok { - return ErrNetNSNotFound - } - - for _, endpoint := range endpoints { - if ninfo, ok := ns.nics[endpoint.Id]; ok && ninfo != nil { - if err := uvm.removeNIC(ctx, ninfo.ID, ninfo.Endpoint); err != nil { - return err - } - delete(ns.nics, endpoint.Id) - } - } - return nil -} - -// RemoveEndpointFromNS removes ``endpoint` in the network -// namespace matching `id`. If no endpoint matching `endpoint.Id` is found in -// the network namespace this command returns `ErrNICNotFound`. -// -// If no network namespace matches `id` this function returns `ErrNetNSNotFound`. -func (uvm *UtilityVM) RemoveEndpointFromNS(ctx context.Context, id string, endpoint *hns.HNSEndpoint) error { - uvm.m.Lock() - defer uvm.m.Unlock() - - ns, ok := uvm.namespaces[id] - if !ok { - return ErrNetNSNotFound - } - - if ninfo, ok := ns.nics[endpoint.Id]; ok && ninfo != nil { - if err := uvm.removeNIC(ctx, ninfo.ID, ninfo.Endpoint); err != nil { - return err - } - delete(ns.nics, endpoint.Id) - } else { - return ErrNICNotFound - } - return nil -} - -// IsNetworkNamespaceSupported returns bool value specifying if network namespace is supported inside the guest -func (uvm *UtilityVM) isNetworkNamespaceSupported() bool { - return uvm.guestCaps.NamespaceAddRequestSupported -} - -func getNetworkModifyRequest(adapterID string, requestType guestrequest.RequestType, settings interface{}) interface{} { - if osversion.Build() >= osversion.RS5 { - return guestrequest.NetworkModifyRequest{ - AdapterId: adapterID, - RequestType: requestType, - Settings: settings, - } - } - return guestrequest.RS4NetworkModifyRequest{ - AdapterInstanceId: adapterID, - RequestType: requestType, - Settings: settings, - } -} - -// addNIC adds a nic to the Utility VM. -func (uvm *UtilityVM) addNIC(ctx context.Context, id string, endpoint *hns.HNSEndpoint) error { - // First a pre-add. This is a guest-only request and is only done on Windows. - if uvm.operatingSystem == "windows" { - preAddRequest := hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeNetwork, - RequestType: guestrequest.RequestTypeAdd, - Settings: getNetworkModifyRequest( - id, - guestrequest.RequestTypePreAdd, - endpoint), - }, - } - if err := uvm.modify(ctx, &preAddRequest); err != nil { - return err - } - } - - // Then the Add itself - request := hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeAdd, - ResourcePath: fmt.Sprintf(resourcepaths.NetworkResourceFormat, id), - Settings: hcsschema.NetworkAdapter{ - EndpointId: endpoint.Id, - MacAddress: endpoint.MacAddress, - }, - } - - if uvm.operatingSystem == "windows" { - request.GuestRequest = guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeNetwork, - RequestType: guestrequest.RequestTypeAdd, - Settings: getNetworkModifyRequest( - id, - guestrequest.RequestTypeAdd, - nil), - } - } else { - // Verify this version of LCOW supports Network HotAdd - if uvm.isNetworkNamespaceSupported() { - request.GuestRequest = guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeNetwork, - RequestType: guestrequest.RequestTypeAdd, - Settings: &guestresource.LCOWNetworkAdapter{ - NamespaceID: endpoint.Namespace.ID, - ID: id, - MacAddress: endpoint.MacAddress, - IPAddress: endpoint.IPAddress.String(), - PrefixLength: endpoint.PrefixLength, - GatewayAddress: endpoint.GatewayAddress, - DNSSuffix: endpoint.DNSSuffix, - DNSServerList: endpoint.DNSServerList, - EnableLowMetric: endpoint.EnableLowMetric, - EncapOverhead: endpoint.EncapOverhead, - }, - } - } - } - - if err := uvm.modify(ctx, &request); err != nil { - return err - } - - return nil -} - -func (uvm *UtilityVM) removeNIC(ctx context.Context, id string, endpoint *hns.HNSEndpoint) error { - request := hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeRemove, - ResourcePath: fmt.Sprintf(resourcepaths.NetworkResourceFormat, id), - Settings: hcsschema.NetworkAdapter{ - EndpointId: endpoint.Id, - MacAddress: endpoint.MacAddress, - }, - } - - if uvm.operatingSystem == "windows" { - request.GuestRequest = hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeRemove, - Settings: getNetworkModifyRequest( - id, - guestrequest.RequestTypeRemove, - nil), - } - } else { - // Verify this version of LCOW supports Network HotRemove - if uvm.isNetworkNamespaceSupported() { - request.GuestRequest = guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeNetwork, - RequestType: guestrequest.RequestTypeRemove, - Settings: &guestresource.LCOWNetworkAdapter{ - NamespaceID: endpoint.Namespace.ID, - ID: endpoint.Id, - }, - } - } - } - - if err := uvm.modify(ctx, &request); err != nil { - return err - } - return nil -} - -// Removes all NICs added to this uvm. -func (uvm *UtilityVM) RemoveAllNICs(ctx context.Context) error { - for _, ns := range uvm.namespaces { - for _, ninfo := range ns.nics { - if err := uvm.removeNIC(ctx, ninfo.ID, ninfo.Endpoint); err != nil { - return err - } - } - } - return nil -} - -// UpdateNIC updates a UVM's network adapter. -func (uvm *UtilityVM) UpdateNIC(ctx context.Context, id string, settings *hcsschema.NetworkAdapter) error { - req := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeUpdate, - ResourcePath: fmt.Sprintf(resourcepaths.NetworkResourceFormat, id), - Settings: settings, - } - return uvm.modify(ctx, req) -} - -// AddNICInGuest makes a request to setup a network adapter's interface inside the lcow guest. -// This is primarily used for adding NICs in the guest that have been VPCI assigned. -func (uvm *UtilityVM) AddNICInGuest(ctx context.Context, cfg *guestresource.LCOWNetworkAdapter) error { - if !uvm.isNetworkNamespaceSupported() { - return fmt.Errorf("guest does not support network namespaces and cannot add VF NIC %+v", cfg) - } - request := hcsschema.ModifySettingRequest{} - request.GuestRequest = guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeNetwork, - RequestType: guestrequest.RequestTypeAdd, - Settings: cfg, - } - - return uvm.modify(ctx, &request) -} - -// RemoveNICInGuest makes a request to remove a network interface inside the lcow guest. -// This is primarily used for removing NICs in the guest that were VPCI assigned. -func (uvm *UtilityVM) RemoveNICInGuest(ctx context.Context, cfg *guestresource.LCOWNetworkAdapter) error { - if !uvm.isNetworkNamespaceSupported() { - return fmt.Errorf("guest does not support network namespaces and cannot remove VF NIC %+v", cfg) - } - request := hcsschema.ModifySettingRequest{} - request.GuestRequest = guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeNetwork, - RequestType: guestrequest.RequestTypeRemove, - Settings: cfg, - } - - return uvm.modify(ctx, &request) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/pipes.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/pipes.go deleted file mode 100644 index cc67f9a798..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/pipes.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - "strings" - - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -const pipePrefix = `\\.\pipe\` - -// PipeMount contains the host path for pipe mount -type PipeMount struct { - // UVM the resource belongs to - vm *UtilityVM - HostPath string -} - -// Release frees the resources of the corresponding pipe Mount -func (pipe *PipeMount) Release(ctx context.Context) error { - if err := pipe.vm.RemovePipe(ctx, pipe.HostPath); err != nil { - return fmt.Errorf("failed to remove pipe mount: %s", err) - } - return nil -} - -// AddPipe shares a named pipe into the UVM. -func (uvm *UtilityVM) AddPipe(ctx context.Context, hostPath string) (*PipeMount, error) { - modification := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeAdd, - ResourcePath: fmt.Sprintf(resourcepaths.MappedPipeResourceFormat, hostPath), - } - if err := uvm.modify(ctx, modification); err != nil { - return nil, err - } - return &PipeMount{uvm, hostPath}, nil -} - -// RemovePipe removes a shared named pipe from the UVM. -func (uvm *UtilityVM) RemovePipe(ctx context.Context, hostPath string) error { - modification := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeRemove, - ResourcePath: fmt.Sprintf(resourcepaths.MappedPipeResourceFormat, hostPath), - } - if err := uvm.modify(ctx, modification); err != nil { - return err - } - return nil -} - -// IsPipe returns true if the given path references a named pipe. -func IsPipe(hostPath string) bool { - return strings.HasPrefix(hostPath, pipePrefix) -} - -// GetContainerPipeMapping returns the source and destination to use for a given -// pipe mount in a container. -func GetContainerPipeMapping(uvm *UtilityVM, mount specs.Mount) (src string, dst string) { - if uvm == nil { - src = mount.Source - } else { - src = vsmbSharePrefix + `IPC$\` + strings.TrimPrefix(mount.Source, pipePrefix) - } - dst = strings.TrimPrefix(mount.Destination, pipePrefix) - return src, dst -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/plan9.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/plan9.go deleted file mode 100644 index d8fce975f8..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/plan9.go +++ /dev/null @@ -1,140 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "errors" - "fmt" - "strconv" - - "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/protocol/guestresource" - "github.com/Microsoft/hcsshim/osversion" -) - -// Plan9Share is a struct containing host paths for the UVM -type Plan9Share struct { - // UVM resource belongs to - vm *UtilityVM - name, uvmPath string -} - -// Release frees the resources of the corresponding Plan9 share -func (p9 *Plan9Share) Release(ctx context.Context) error { - if err := p9.vm.RemovePlan9(ctx, p9); err != nil { - return fmt.Errorf("failed to remove plan9 share: %s", err) - } - return nil -} - -const plan9Port = 564 - -// AddPlan9 adds a Plan9 share to a utility VM. -func (uvm *UtilityVM) AddPlan9(ctx context.Context, hostPath string, uvmPath string, readOnly bool, restrict bool, allowedNames []string) (*Plan9Share, error) { - if uvm.operatingSystem != "linux" { - return nil, errNotSupported - } - if restrict && osversion.Build() < osversion.V19H1 { - return nil, errors.New("single-file mappings are not supported on this build of Windows") - } - if uvmPath == "" { - return nil, fmt.Errorf("uvmPath must be passed to AddPlan9") - } - if !readOnly && uvm.NoWritableFileShares() { - return nil, fmt.Errorf("adding writable shares is denied: %w", hcs.ErrOperationDenied) - } - - // TODO: JTERRY75 - These are marked private in the schema. For now use them - // but when there are public variants we need to switch to them. - const ( - shareFlagsReadOnly int32 = 0x00000001 - shareFlagsLinuxMetadata int32 = 0x00000004 - shareFlagsCaseSensitive int32 = 0x00000008 - shareFlagsRestrictFileAccess int32 = 0x00000080 - ) - - // TODO: JTERRY75 - `shareFlagsCaseSensitive` only works if the Windows - // `hostPath` supports case sensitivity. We need to detect this case before - // forwarding this flag in all cases. - flags := shareFlagsLinuxMetadata // | shareFlagsCaseSensitive - if readOnly { - flags |= shareFlagsReadOnly - } - if restrict { - flags |= shareFlagsRestrictFileAccess - } - - uvm.m.Lock() - index := uvm.plan9Counter - uvm.plan9Counter++ - uvm.m.Unlock() - name := strconv.FormatUint(index, 10) - - modification := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeAdd, - Settings: hcsschema.Plan9Share{ - Name: name, - AccessName: name, - Path: hostPath, - Port: plan9Port, - Flags: flags, - AllowedFiles: allowedNames, - }, - ResourcePath: resourcepaths.Plan9ShareResourcePath, - GuestRequest: guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeMappedDirectory, - RequestType: guestrequest.RequestTypeAdd, - Settings: guestresource.LCOWMappedDirectory{ - MountPath: uvmPath, - ShareName: name, - Port: plan9Port, - ReadOnly: readOnly, - }, - }, - } - - if err := uvm.modify(ctx, modification); err != nil { - return nil, err - } - - return &Plan9Share{ - vm: uvm, - name: name, - uvmPath: uvmPath, - }, nil -} - -// RemovePlan9 removes a Plan9 share from a utility VM. Each Plan9 share is ref-counted -// and only actually removed when the ref-count drops to zero. -func (uvm *UtilityVM) RemovePlan9(ctx context.Context, share *Plan9Share) error { - if uvm.operatingSystem != "linux" { - return errNotSupported - } - - modification := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeRemove, - Settings: hcsschema.Plan9Share{ - Name: share.name, - AccessName: share.name, - Port: plan9Port, - }, - ResourcePath: resourcepaths.Plan9ShareResourcePath, - GuestRequest: guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeMappedDirectory, - RequestType: guestrequest.RequestTypeRemove, - Settings: guestresource.LCOWMappedDirectory{ - MountPath: share.uvmPath, - ShareName: share.name, - Port: plan9Port, - }, - }, - } - if err := uvm.modify(ctx, modification); err != nil { - return fmt.Errorf("failed to remove plan9 share %s from %s: %+v: %s", share.name, uvm.id, modification, err) - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go deleted file mode 100644 index e21d0ddfe2..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go +++ /dev/null @@ -1,732 +0,0 @@ -//go:build windows - -package uvm - -import ( - "bytes" - "context" - "encoding/gob" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/Microsoft/hcsshim/internal/copyfile" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/protocol/guestresource" - "github.com/Microsoft/hcsshim/internal/security" - "github.com/Microsoft/hcsshim/internal/wclayer" -) - -// VMAccessType is used to determine the various types of access we can -// grant for a given file. -type VMAccessType int - -const ( - // `VMAccessTypeNoop` indicates no additional access should be given. Note - // this should be used for layers and gpu vhd where we have given VM group - // access outside of the shim (containerd for layers, package installation - // for gpu vhd). - VMAccessTypeNoop VMAccessType = iota - // `VMAccessTypeGroup` indicates we should give access to a file for the VM group sid - VMAccessTypeGroup - // `VMAccessTypeIndividual` indicates we should give additional access to a file for - // the running VM only - VMAccessTypeIndividual -) - -const scsiCurrentSerialVersionID = 2 - -var ( - ErrNoAvailableLocation = fmt.Errorf("no available location") - ErrNotAttached = fmt.Errorf("not attached") - ErrAlreadyAttached = fmt.Errorf("already attached") - ErrNoSCSIControllers = fmt.Errorf("no SCSI controllers configured for this utility VM") - ErrTooManyAttachments = fmt.Errorf("too many SCSI attachments") - ErrSCSILayerWCOWUnsupported = fmt.Errorf("SCSI attached layers are not supported for WCOW") -) - -// Release frees the resources of the corresponding Scsi Mount -func (sm *SCSIMount) Release(ctx context.Context) error { - if err := sm.vm.RemoveSCSI(ctx, sm.HostPath); err != nil { - return fmt.Errorf("failed to remove SCSI device: %s", err) - } - return nil -} - -// SCSIMount struct representing a SCSI mount point and the UVM -// it belongs to. -type SCSIMount struct { - // Utility VM the scsi mount belongs to - vm *UtilityVM - // path is the host path to the vhd that is mounted. - HostPath string - // path for the uvm - UVMPath string - // scsi controller - Controller int - // scsi logical unit number - LUN int32 - // While most VHDs attached to SCSI are scratch spaces, in the case of LCOW - // when the size is over the size possible to attach to PMEM, we use SCSI for - // read-only layers. As RO layers are shared, we perform ref-counting. - isLayer bool - refCount uint32 - // specifies if this is an encrypted VHD - encrypted bool - // specifies if this is a readonly layer - readOnly bool - // "VirtualDisk" or "PassThru" or "ExtensibleVirtualDisk" disk attachment type. - attachmentType string - // If attachmentType is "ExtensibleVirtualDisk" then extensibleVirtualDiskType should - // specify the type of it (for e.g "space" for storage spaces). Otherwise this should be - // empty. - extensibleVirtualDiskType string - // serialization ID - serialVersionID uint32 - // Make sure that serialVersionID is always the last field and its value is - // incremented every time this structure is updated -} - -// addSCSIRequest is an internal struct used to hold all the parameters that are sent to -// the addSCSIActual method. -type addSCSIRequest struct { - // host path to the disk that should be added as a SCSI disk. - hostPath string - // the path inside the uvm at which this disk should show up. Can be empty. - uvmPath string - // attachmentType is required and `must` be `VirtualDisk` for vhd/vhdx - // attachments, `PassThru` for physical disk and `ExtensibleVirtualDisk` for - // Extensible virtual disks. - attachmentType string - // indicates if the VHD is encrypted - encrypted bool - // indicates if the attachment should be added read only. - readOnly bool - // guestOptions is a slice that contains optional information to pass to the guest - // service. - guestOptions []string - // indicates what access to grant the vm for the hostpath. Only required for - // `VirtualDisk` and `PassThru` disk types. - vmAccess VMAccessType - // `evdType` indicates the type of the extensible virtual disk if `attachmentType` - // is "ExtensibleVirtualDisk" should be empty otherwise. - evdType string -} - -// RefCount returns the current refcount for the SCSI mount. -func (sm *SCSIMount) RefCount() uint32 { - return sm.refCount -} - -func (sm *SCSIMount) logFormat() logrus.Fields { - return logrus.Fields{ - "HostPath": sm.HostPath, - "UVMPath": sm.UVMPath, - "isLayer": sm.isLayer, - "refCount": sm.refCount, - "Controller": sm.Controller, - "LUN": sm.LUN, - "ExtensibleVirtualDiskType": sm.extensibleVirtualDiskType, - "SerialVersionID": sm.serialVersionID, - } -} - -func newSCSIMount( - uvm *UtilityVM, - hostPath string, - uvmPath string, - attachmentType string, - evdType string, - refCount uint32, - controller int, - lun int32, - readOnly bool, - encrypted bool, -) *SCSIMount { - return &SCSIMount{ - vm: uvm, - HostPath: hostPath, - UVMPath: uvmPath, - refCount: refCount, - Controller: controller, - LUN: int32(lun), - encrypted: encrypted, - readOnly: readOnly, - attachmentType: attachmentType, - extensibleVirtualDiskType: evdType, - serialVersionID: scsiCurrentSerialVersionID, - } -} - -// allocateSCSISlot finds the next available slot on the -// SCSI controllers associated with a utility VM to use. -// Lock must be held when calling this function -func (uvm *UtilityVM) allocateSCSISlot(ctx context.Context) (int, int, error) { - for controller := 0; controller < int(uvm.scsiControllerCount); controller++ { - for lun, sm := range uvm.scsiLocations[controller] { - // If sm is nil, we have found an open slot so we allocate a new SCSIMount - if sm == nil { - return controller, lun, nil - } - } - } - return -1, -1, ErrNoAvailableLocation -} - -func (uvm *UtilityVM) deallocateSCSIMount(ctx context.Context, sm *SCSIMount) { - uvm.m.Lock() - defer uvm.m.Unlock() - if sm != nil { - log.G(ctx).WithFields(sm.logFormat()).Debug("removed SCSI location") - uvm.scsiLocations[sm.Controller][sm.LUN] = nil - } -} - -// Lock must be held when calling this function. -func (uvm *UtilityVM) findSCSIAttachment(ctx context.Context, findThisHostPath string) (*SCSIMount, error) { - for _, luns := range uvm.scsiLocations { - for _, sm := range luns { - if sm != nil && sm.HostPath == findThisHostPath { - log.G(ctx).WithFields(sm.logFormat()).Debug("found SCSI location") - return sm, nil - } - } - } - return nil, ErrNotAttached -} - -// RemoveSCSI removes a SCSI disk from a utility VM. -func (uvm *UtilityVM) RemoveSCSI(ctx context.Context, hostPath string) error { - uvm.m.Lock() - defer uvm.m.Unlock() - - if uvm.scsiControllerCount == 0 { - return ErrNoSCSIControllers - } - - // Make sure it is actually attached - sm, err := uvm.findSCSIAttachment(ctx, hostPath) - if err != nil { - return err - } - - sm.refCount-- - if sm.refCount > 0 { - return nil - } - - scsiModification := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeRemove, - ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, guestrequest.ScsiControllerGuids[sm.Controller], sm.LUN), - } - - var verity *guestresource.DeviceVerityInfo - if v, iErr := readVeritySuperBlock(ctx, hostPath); iErr != nil { - log.G(ctx).WithError(iErr).WithField("hostPath", sm.HostPath).Debug("unable to read dm-verity information from VHD") - } else { - if v != nil { - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": hostPath, - "rootDigest": v.RootDigest, - }).Debug("removing SCSI with dm-verity") - } - verity = v - } - - // Include the GuestRequest so that the GCS ejects the disk cleanly if the - // disk was attached/mounted - // - // Note: We always send a guest eject even if there is no UVM path in lcow - // so that we synchronize the guest state. This seems to always avoid SCSI - // related errors if this index quickly reused by another container. - if uvm.operatingSystem == "windows" && sm.UVMPath != "" { - scsiModification.GuestRequest = guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeMappedVirtualDisk, - RequestType: guestrequest.RequestTypeRemove, - Settings: guestresource.WCOWMappedVirtualDisk{ - ContainerPath: sm.UVMPath, - Lun: sm.LUN, - }, - } - } else { - scsiModification.GuestRequest = guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeMappedVirtualDisk, - RequestType: guestrequest.RequestTypeRemove, - Settings: guestresource.LCOWMappedVirtualDisk{ - MountPath: sm.UVMPath, // May be blank in attach-only - Lun: uint8(sm.LUN), - Controller: uint8(sm.Controller), - VerityInfo: verity, - }, - } - } - - if err := uvm.modify(ctx, scsiModification); err != nil { - return fmt.Errorf("failed to remove SCSI disk %s from container %s: %s", hostPath, uvm.id, err) - } - log.G(ctx).WithFields(sm.logFormat()).Debug("removed SCSI location") - uvm.scsiLocations[sm.Controller][sm.LUN] = nil - return nil -} - -// AddSCSI adds a SCSI disk to a utility VM at the next available location. This -// function should be called for adding a scratch layer, a read-only layer as an -// alternative to VPMEM, or for other VHD mounts. -// -// `hostPath` is required and must point to a vhd/vhdx path. -// -// `uvmPath` is optional. If not provided, no guest request will be made -// -// `readOnly` set to `true` if the vhd/vhdx should be attached read only. -// -// `encrypted` set to `true` if the vhd/vhdx should be attached in encrypted mode. -// The device will be formatted, so this option must be used only when creating -// scratch vhd/vhdx. -// -// `guestOptions` is a slice that contains optional information to pass -// to the guest service -// -// `vmAccess` indicates what access to grant the vm for the hostpath -func (uvm *UtilityVM) AddSCSI( - ctx context.Context, - hostPath string, - uvmPath string, - readOnly bool, - encrypted bool, - guestOptions []string, - vmAccess VMAccessType, -) (*SCSIMount, error) { - addReq := &addSCSIRequest{ - hostPath: hostPath, - uvmPath: uvmPath, - attachmentType: "VirtualDisk", - readOnly: readOnly, - encrypted: encrypted, - guestOptions: guestOptions, - vmAccess: vmAccess, - } - return uvm.addSCSIActual(ctx, addReq) -} - -// AddSCSIPhysicalDisk attaches a physical disk from the host directly to the -// Utility VM at the next available location. -// -// `hostPath` is required and `likely` start's with `\\.\PHYSICALDRIVE`. -// -// `uvmPath` is optional if a guest mount is not requested. -// -// `readOnly` set to `true` if the physical disk should be attached read only. -// -// `guestOptions` is a slice that contains optional information to pass -// to the guest service -func (uvm *UtilityVM) AddSCSIPhysicalDisk(ctx context.Context, hostPath, uvmPath string, readOnly bool, guestOptions []string) (*SCSIMount, error) { - addReq := &addSCSIRequest{ - hostPath: hostPath, - uvmPath: uvmPath, - attachmentType: "PassThru", - readOnly: readOnly, - guestOptions: guestOptions, - vmAccess: VMAccessTypeIndividual, - } - return uvm.addSCSIActual(ctx, addReq) -} - -// AddSCSIExtensibleVirtualDisk adds an extensible virtual disk as a SCSI mount -// to the utility VM at the next available location. All such disks which are not actual virtual disks -// but provide the same SCSI interface are added to the UVM as Extensible Virtual disks. -// -// `hostPath` is required. Depending on the type of the extensible virtual disk the format of `hostPath` can -// be different. -// For example, in case of storage spaces the host path must be in the -// `evd://space/{storage_pool_unique_ID}{virtual_disk_unique_ID}` format. -// -// `uvmPath` must be provided in order to be able to use this disk in a container. -// -// `readOnly` set to `true` if the virtual disk should be attached read only. -// -// `vmAccess` indicates what access to grant the vm for the hostpath -func (uvm *UtilityVM) AddSCSIExtensibleVirtualDisk(ctx context.Context, hostPath, uvmPath string, readOnly bool) (*SCSIMount, error) { - if uvmPath == "" { - return nil, errors.New("uvmPath can not be empty for extensible virtual disk") - } - evdType, mountPath, err := ParseExtensibleVirtualDiskPath(hostPath) - if err != nil { - return nil, err - } - addReq := &addSCSIRequest{ - hostPath: mountPath, - uvmPath: uvmPath, - attachmentType: "ExtensibleVirtualDisk", - readOnly: readOnly, - guestOptions: []string{}, - vmAccess: VMAccessTypeIndividual, - evdType: evdType, - } - return uvm.addSCSIActual(ctx, addReq) -} - -// addSCSIActual is the implementation behind the external functions AddSCSI, -// AddSCSIPhysicalDisk, AddSCSIExtensibleVirtualDisk. -// -// We are in control of everything ourselves. Hence we have ref- counting and -// so-on tracking what SCSI locations are available or used. -// -// Returns result from calling modify with the given scsi mount -func (uvm *UtilityVM) addSCSIActual(ctx context.Context, addReq *addSCSIRequest) (sm *SCSIMount, err error) { - sm, existed, err := uvm.allocateSCSIMount( - ctx, - addReq.readOnly, - addReq.encrypted, - addReq.hostPath, - addReq.uvmPath, - addReq.attachmentType, - addReq.evdType, - addReq.vmAccess, - ) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - uvm.deallocateSCSIMount(ctx, sm) - } - }() - - if existed { - return sm, nil - } - - if uvm.scsiControllerCount == 0 { - return nil, ErrNoSCSIControllers - } - - SCSIModification := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeAdd, - Settings: hcsschema.Attachment{ - Path: sm.HostPath, - Type_: addReq.attachmentType, - ReadOnly: addReq.readOnly, - ExtensibleVirtualDiskType: addReq.evdType, - }, - ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, guestrequest.ScsiControllerGuids[sm.Controller], sm.LUN), - } - - if sm.UVMPath != "" { - guestReq := guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeMappedVirtualDisk, - RequestType: guestrequest.RequestTypeAdd, - } - - if uvm.operatingSystem == "windows" { - guestReq.Settings = guestresource.WCOWMappedVirtualDisk{ - ContainerPath: sm.UVMPath, - Lun: sm.LUN, - } - } else { - var verity *guestresource.DeviceVerityInfo - if v, iErr := readVeritySuperBlock(ctx, sm.HostPath); iErr != nil { - log.G(ctx).WithError(iErr).WithField("hostPath", sm.HostPath).Debug("unable to read dm-verity information from VHD") - } else { - if v != nil { - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": sm.HostPath, - "rootDigest": v.RootDigest, - }).Debug("adding SCSI with dm-verity") - } - verity = v - } - - guestReq.Settings = guestresource.LCOWMappedVirtualDisk{ - MountPath: sm.UVMPath, - Lun: uint8(sm.LUN), - Controller: uint8(sm.Controller), - ReadOnly: addReq.readOnly, - Encrypted: addReq.encrypted, - Options: addReq.guestOptions, - VerityInfo: verity, - } - } - SCSIModification.GuestRequest = guestReq - } - - if err := uvm.modify(ctx, SCSIModification); err != nil { - return nil, fmt.Errorf("failed to modify UVM with new SCSI mount: %s", err) - } - return sm, nil -} - -// allocateSCSIMount grants vm access to hostpath and increments the ref count of an existing scsi -// device or allocates a new one if not already present. -// Returns the resulting *SCSIMount, a bool indicating if the scsi device was already present, -// and error if any. -func (uvm *UtilityVM) allocateSCSIMount( - ctx context.Context, - readOnly bool, - encrypted bool, - hostPath string, - uvmPath string, - attachmentType string, - evdType string, - vmAccess VMAccessType, -) (*SCSIMount, bool, error) { - if attachmentType != "ExtensibleVirtualDisk" { - // Ensure the utility VM has access - err := grantAccess(ctx, uvm.id, hostPath, vmAccess) - if err != nil { - return nil, false, errors.Wrapf(err, "failed to grant VM access for SCSI mount") - } - } - // We must hold the lock throughout the lookup (findSCSIAttachment) until - // after the possible allocation (allocateSCSISlot) has been completed to ensure - // there isn't a race condition for it being attached by another thread between - // these two operations. - uvm.m.Lock() - defer uvm.m.Unlock() - if sm, err := uvm.findSCSIAttachment(ctx, hostPath); err == nil { - sm.refCount++ - return sm, true, nil - } - - controller, lun, err := uvm.allocateSCSISlot(ctx) - if err != nil { - return nil, false, err - } - - uvm.scsiLocations[controller][lun] = newSCSIMount( - uvm, - hostPath, - uvmPath, - attachmentType, - evdType, - 1, - controller, - int32(lun), - readOnly, - encrypted, - ) - - log.G(ctx).WithFields(uvm.scsiLocations[controller][lun].logFormat()).Debug("allocated SCSI mount") - - return uvm.scsiLocations[controller][lun], false, nil -} - -// GetScsiUvmPath returns the guest mounted path of a SCSI drive. -// -// If `hostPath` is not mounted returns `ErrNotAttached`. -func (uvm *UtilityVM) GetScsiUvmPath(ctx context.Context, hostPath string) (string, error) { - uvm.m.Lock() - defer uvm.m.Unlock() - sm, err := uvm.findSCSIAttachment(ctx, hostPath) - if err != nil { - return "", err - } - return sm.UVMPath, err -} - -// ScratchEncryptionEnabled is a getter for `uvm.encryptScratch`. -// -// Returns true if the scratch disks should be encrypted, false otherwise. -func (uvm *UtilityVM) ScratchEncryptionEnabled() bool { - return uvm.encryptScratch -} - -// grantAccess helper function to grant access to a file for the vm or vm group -func grantAccess(ctx context.Context, uvmID string, hostPath string, vmAccess VMAccessType) error { - switch vmAccess { - case VMAccessTypeGroup: - log.G(ctx).WithField("path", hostPath).Debug("granting vm group access") - return security.GrantVmGroupAccess(hostPath) - case VMAccessTypeIndividual: - return wclayer.GrantVmAccess(ctx, uvmID, hostPath) - } - return nil -} - -var _ = (Cloneable)(&SCSIMount{}) - -// GobEncode serializes the SCSIMount struct -func (sm *SCSIMount) GobEncode() ([]byte, error) { - var buf bytes.Buffer - encoder := gob.NewEncoder(&buf) - errMsgFmt := "failed to encode SCSIMount: %s" - // encode only the fields that can be safely deserialized. - if err := encoder.Encode(sm.serialVersionID); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.HostPath); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.UVMPath); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.Controller); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.LUN); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.readOnly); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.attachmentType); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.extensibleVirtualDiskType); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - return buf.Bytes(), nil -} - -// GobDecode deserializes the SCSIMount struct into the struct on which this is called -// (i.e the sm pointer) -func (sm *SCSIMount) GobDecode(data []byte) error { - buf := bytes.NewBuffer(data) - decoder := gob.NewDecoder(buf) - errMsgFmt := "failed to decode SCSIMount: %s" - // fields should be decoded in the same order in which they were encoded. - if err := decoder.Decode(&sm.serialVersionID); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if sm.serialVersionID != scsiCurrentSerialVersionID { - return fmt.Errorf("serialized version of SCSIMount: %d doesn't match with the current version: %d", sm.serialVersionID, scsiCurrentSerialVersionID) - } - if err := decoder.Decode(&sm.HostPath); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&sm.UVMPath); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&sm.Controller); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&sm.LUN); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&sm.readOnly); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&sm.attachmentType); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&sm.extensibleVirtualDiskType); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - return nil -} - -// Clone function creates a clone of the SCSIMount `sm` and adds the cloned SCSIMount to -// the uvm `vm`. If `sm` is read only then it is simply added to the `vm`. But if it is a -// writable mount(e.g a scratch layer) then a copy of it is made and that copy is added -// to the `vm`. -func (sm *SCSIMount) Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) error { - var ( - dstVhdPath string = sm.HostPath - err error - dir string - conStr string = guestrequest.ScsiControllerGuids[sm.Controller] - lunStr string = fmt.Sprintf("%d", sm.LUN) - ) - - if !sm.readOnly { - // This is a writable SCSI mount. It must be either the - // 1. scratch VHD of the UVM or - // 2. scratch VHD of the container. - // A user provided writable SCSI mount is not allowed on the template UVM - // or container and so this SCSI mount has to be the scratch VHD of the - // UVM or container. The container inside this UVM will automatically be - // cloned here when we are cloning the uvm itself. We will receive a - // request for creation of this container later and that request will - // specify the storage path for this container. However, that storage - // location is not available now so we just use the storage path of the - // uvm instead. - // TODO(ambarve): Find a better way for handling this. Problem with this - // approach is that the scratch VHD of the container will not be - // automatically cleaned after container exits. It will stay there as long - // as the UVM keeps running. - - // For the scratch VHD of the VM (always attached at Controller:0, LUN:0) - // clone it in the scratch folder - dir = cd.scratchFolder - if sm.Controller != 0 || sm.LUN != 0 { - dir, err = ioutil.TempDir(cd.scratchFolder, fmt.Sprintf("clone-mount-%d-%d", sm.Controller, sm.LUN)) - if err != nil { - return fmt.Errorf("error while creating directory for scsi mounts of clone vm: %s", err) - } - } - - // copy the VHDX - dstVhdPath = filepath.Join(dir, filepath.Base(sm.HostPath)) - log.G(ctx).WithFields(logrus.Fields{ - "source hostPath": sm.HostPath, - "controller": sm.Controller, - "LUN": sm.LUN, - "destination hostPath": dstVhdPath, - }).Debug("Creating a clone of SCSI mount") - - if err = copyfile.CopyFile(ctx, sm.HostPath, dstVhdPath, true); err != nil { - return err - } - - if err = grantAccess(ctx, cd.uvmID, dstVhdPath, VMAccessTypeIndividual); err != nil { - os.Remove(dstVhdPath) - return err - } - } - - if cd.doc.VirtualMachine.Devices.Scsi == nil { - cd.doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} - } - - if _, ok := cd.doc.VirtualMachine.Devices.Scsi[conStr]; !ok { - cd.doc.VirtualMachine.Devices.Scsi[conStr] = hcsschema.Scsi{ - Attachments: map[string]hcsschema.Attachment{}, - } - } - - cd.doc.VirtualMachine.Devices.Scsi[conStr].Attachments[lunStr] = hcsschema.Attachment{ - Path: dstVhdPath, - Type_: sm.attachmentType, - } - - clonedScsiMount := newSCSIMount( - vm, - dstVhdPath, - sm.UVMPath, - sm.attachmentType, - sm.extensibleVirtualDiskType, - 1, - sm.Controller, - sm.LUN, - sm.readOnly, - sm.encrypted, - ) - - vm.scsiLocations[sm.Controller][sm.LUN] = clonedScsiMount - - return nil -} - -func (sm *SCSIMount) GetSerialVersionID() uint32 { - return scsiCurrentSerialVersionID -} - -// ParseExtensibleVirtualDiskPath parses the evd path provided in the config. -// extensible virtual disk path has format "evd:///" -// this function parses that and returns the `evdType` and `evd-mount-path`. -func ParseExtensibleVirtualDiskPath(hostPath string) (evdType, mountPath string, err error) { - trimmedPath := strings.TrimPrefix(hostPath, "evd://") - separatorIndex := strings.Index(trimmedPath, "/") - if separatorIndex <= 0 { - return "", "", errors.Errorf("invalid extensible vhd path: %s", hostPath) - } - return trimmedPath[:separatorIndex], trimmedPath[separatorIndex+1:], nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/security_policy.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/security_policy.go deleted file mode 100644 index 6570928ea7..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/security_policy.go +++ /dev/null @@ -1,56 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/protocol/guestresource" - "github.com/Microsoft/hcsshim/pkg/securitypolicy" -) - -// SetSecurityPolicy tells the gcs instance in the UVM what policy to apply. -// -// This has to happen before we start mounting things or generally changing -// the state of the UVM after is has been measured at startup -func (uvm *UtilityVM) SetSecurityPolicy(ctx context.Context, policy string) error { - if uvm.operatingSystem != "linux" { - return errNotSupported - } - - if policy == "" { - openDoorPolicy := securitypolicy.NewOpenDoorPolicy() - policyString, err := openDoorPolicy.EncodeToString() - if err != nil { - return err - } - policy = policyString - } - - uvm.m.Lock() - defer uvm.m.Unlock() - - modification := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeAdd, - Settings: securitypolicy.EncodedSecurityPolicy{ - SecurityPolicy: policy, - }, - } - - modification.GuestRequest = guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeSecurityPolicy, - RequestType: guestrequest.RequestTypeAdd, - Settings: securitypolicy.EncodedSecurityPolicy{ - SecurityPolicy: policy, - }, - } - - if err := uvm.modify(ctx, modification); err != nil { - return fmt.Errorf("uvm::Policy: failed to modify utility VM configuration: %s", err) - } - - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/share.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/share.go deleted file mode 100644 index 41ecda3a3a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/share.go +++ /dev/null @@ -1,74 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - "os" - "path/filepath" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/protocol/guestresource" -) - -// Share shares in file(s) from `reqHostPath` on the host machine to `reqUVMPath` inside the UVM. -// This function handles both LCOW and WCOW scenarios. -func (uvm *UtilityVM) Share(ctx context.Context, reqHostPath, reqUVMPath string, readOnly bool) (err error) { - if uvm.OS() == "windows" { - options := uvm.DefaultVSMBOptions(readOnly) - vsmbShare, err := uvm.AddVSMB(ctx, reqHostPath, options) - if err != nil { - return err - } - defer func() { - if err != nil { - _ = vsmbShare.Release(ctx) - } - }() - - sharePath, err := uvm.GetVSMBUvmPath(ctx, reqHostPath, readOnly) - if err != nil { - return err - } - guestReq := guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeMappedDirectory, - RequestType: guestrequest.RequestTypeAdd, - Settings: &hcsschema.MappedDirectory{ - HostPath: sharePath, - ContainerPath: reqUVMPath, - ReadOnly: readOnly, - }, - } - if err := uvm.GuestRequest(ctx, guestReq); err != nil { - return err - } - } else { - st, err := os.Stat(reqHostPath) - if err != nil { - return fmt.Errorf("could not open '%s' path on host: %s", reqHostPath, err) - } - var ( - hostPath string = reqHostPath - restrictAccess bool - fileName string - allowedNames []string - ) - if !st.IsDir() { - hostPath, fileName = filepath.Split(hostPath) - allowedNames = append(allowedNames, fileName) - restrictAccess = true - } - plan9Share, err := uvm.AddPlan9(ctx, hostPath, reqUVMPath, readOnly, restrictAccess, allowedNames) - if err != nil { - return err - } - defer func() { - if err != nil { - _ = plan9Share.Release(ctx) - } - }() - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/start.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/start.go deleted file mode 100644 index f59259543a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/start.go +++ /dev/null @@ -1,317 +0,0 @@ -//go:build windows - -package uvm - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "syscall" - "time" - - "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" - - "github.com/Microsoft/hcsshim/internal/gcs" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/protocol/guestresource" -) - -// entropyBytes is the number of bytes of random data to send to a Linux UVM -// during boot to seed the CRNG. There is not much point in making this too -// large since the random data collected from the host is likely computed from a -// relatively small key (256 bits?), so additional bytes would not actually -// increase the entropy of the guest's pool. However, send enough to convince -// containers that there is a large amount of entropy since this idea is -// generally misunderstood. -const entropyBytes = 512 - -type gcsLogEntryStandard struct { - Time time.Time `json:"time"` - Level logrus.Level `json:"level"` - Message string `json:"msg"` -} - -type gcsLogEntry struct { - gcsLogEntryStandard - Fields map[string]interface{} -} - -// FUTURE-jstarks: Change the GCS log format to include type information -// (e.g. by using a different encoding such as protobuf). -func (e *gcsLogEntry) UnmarshalJSON(b []byte) error { - // Default the log level to info. - e.Level = logrus.InfoLevel - if err := json.Unmarshal(b, &e.gcsLogEntryStandard); err != nil { - return err - } - if err := json.Unmarshal(b, &e.Fields); err != nil { - return err - } - // Do not allow fatal or panic level errors to propagate. - if e.Level < logrus.ErrorLevel { - e.Level = logrus.ErrorLevel - } - // Clear special fields. - delete(e.Fields, "time") - delete(e.Fields, "level") - delete(e.Fields, "msg") - // Normalize floats to integers. - for k, v := range e.Fields { - if d, ok := v.(float64); ok && float64(int64(d)) == d { - e.Fields[k] = int64(d) - } - } - return nil -} - -func isDisconnectError(err error) bool { - if o, ok := err.(*net.OpError); ok { - if s, ok := o.Err.(*os.SyscallError); ok { - return s.Err == syscall.WSAECONNABORTED || s.Err == syscall.WSAECONNRESET - } - } - return false -} - -func parseLogrus(vmid string) func(r io.Reader) { - return func(r io.Reader) { - j := json.NewDecoder(r) - e := log.L.Dup() - fields := e.Data - for { - for k := range fields { - delete(fields, k) - } - gcsEntry := gcsLogEntry{Fields: e.Data} - err := j.Decode(&gcsEntry) - if err != nil { - // Something went wrong. Read the rest of the data as a single - // string and log it at once -- it's probably a GCS panic stack. - if err != io.EOF && !isDisconnectError(err) { - logrus.WithFields(logrus.Fields{ - logfields.UVMID: vmid, - logrus.ErrorKey: err, - }).Error("gcs log read") - } - rest, _ := ioutil.ReadAll(io.MultiReader(j.Buffered(), r)) - rest = bytes.TrimSpace(rest) - if len(rest) != 0 { - logrus.WithFields(logrus.Fields{ - logfields.UVMID: vmid, - "stderr": string(rest), - }).Error("gcs terminated") - } - break - } - fields[logfields.UVMID] = vmid - fields["vm.time"] = gcsEntry.Time - e.Log(gcsEntry.Level, gcsEntry.Message) - } - } -} - -// When using an external GCS connection it is necessary to send a ModifySettings request -// for HvSockt so that the GCS can setup some registry keys that are required for running -// containers inside the UVM. In non external GCS connection scenarios this is done by the -// HCS immediately after the GCS connection is done. Since, we are using the external GCS -// connection we should do that setup here after we connect with the GCS. -// This only applies for WCOW -func (uvm *UtilityVM) configureHvSocketForGCS(ctx context.Context) (err error) { - if uvm.OS() != "windows" { - return nil - } - - hvsocketAddress := &hcsschema.HvSocketAddress{ - LocalAddress: uvm.runtimeID.String(), - ParentAddress: gcs.WindowsGcsHvHostID.String(), - } - - conSetupReq := &hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.ModificationRequest{ - RequestType: guestrequest.RequestTypeUpdate, - ResourceType: guestresource.ResourceTypeHvSocket, - Settings: hvsocketAddress, - }, - } - - if err = uvm.modify(ctx, conSetupReq); err != nil { - return fmt.Errorf("failed to configure HVSOCK for external GCS: %s", err) - } - - return nil -} - -// Start synchronously starts the utility VM. -func (uvm *UtilityVM) Start(ctx context.Context) (err error) { - ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) - g, gctx := errgroup.WithContext(ctx) - defer func() { - _ = g.Wait() - }() - defer cancel() - - // Prepare to provide entropy to the init process in the background. This - // must be done in a goroutine since, when using the internal bridge, the - // call to Start() will block until the GCS launches, and this cannot occur - // until the host accepts and closes the entropy connection. - if uvm.entropyListener != nil { - g.Go(func() error { - conn, err := uvm.acceptAndClose(gctx, uvm.entropyListener) - uvm.entropyListener = nil - if err != nil { - return fmt.Errorf("failed to connect to entropy socket: %s", err) - } - defer conn.Close() - _, err = io.CopyN(conn, rand.Reader, entropyBytes) - if err != nil { - return fmt.Errorf("failed to write entropy: %s", err) - } - return nil - }) - } - - if uvm.outputListener != nil { - g.Go(func() error { - conn, err := uvm.acceptAndClose(gctx, uvm.outputListener) - uvm.outputListener = nil - if err != nil { - close(uvm.outputProcessingDone) - return fmt.Errorf("failed to connect to log socket: %s", err) - } - go func() { - uvm.outputHandler(conn) - close(uvm.outputProcessingDone) - }() - return nil - }) - } - - err = uvm.hcsSystem.Start(ctx) - if err != nil { - return err - } - defer func() { - if err != nil { - _ = uvm.hcsSystem.Terminate(ctx) - _ = uvm.hcsSystem.Wait() - } - }() - - // Start waiting on the utility VM. - uvm.exitCh = make(chan struct{}) - go func() { - err := uvm.hcsSystem.Wait() - if err == nil { - err = uvm.hcsSystem.ExitError() - } - uvm.exitErr = err - close(uvm.exitCh) - }() - - // Collect any errors from writing entropy or establishing the log - // connection. - if err = g.Wait(); err != nil { - return err - } - - if uvm.gcListener != nil { - // Accept the GCS connection. - conn, err := uvm.acceptAndClose(ctx, uvm.gcListener) - uvm.gcListener = nil - if err != nil { - return fmt.Errorf("failed to connect to GCS: %s", err) - } - - var initGuestState *gcs.InitialGuestState - if uvm.OS() == "windows" { - // Default to setting the time zone in the UVM to the hosts time zone unless the client asked to avoid this behavior. If so, assign - // to UTC. - if uvm.noInheritHostTimezone { - initGuestState = &gcs.InitialGuestState{ - Timezone: utcTimezone, - } - } else { - tz, err := getTimezone() - if err != nil { - return err - } - initGuestState = &gcs.InitialGuestState{ - Timezone: tz, - } - } - } - // Start the GCS protocol. - gcc := &gcs.GuestConnectionConfig{ - Conn: conn, - Log: log.G(ctx).WithField(logfields.UVMID, uvm.id), - IoListen: gcs.HvsockIoListen(uvm.runtimeID), - InitGuestState: initGuestState, - } - uvm.gc, err = gcc.Connect(ctx, !uvm.IsClone) - if err != nil { - return err - } - uvm.guestCaps = *uvm.gc.Capabilities() - uvm.protocol = uvm.gc.Protocol() - - // initial setup required for external GCS connection - if err = uvm.configureHvSocketForGCS(ctx); err != nil { - return fmt.Errorf("failed to do initial GCS setup: %s", err) - } - } else { - // Cache the guest connection properties. - properties, err := uvm.hcsSystem.Properties(ctx, schema1.PropertyTypeGuestConnection) - if err != nil { - return err - } - uvm.guestCaps = properties.GuestConnectionInfo.GuestDefinedCapabilities - uvm.protocol = properties.GuestConnectionInfo.ProtocolVersion - } - - return nil -} - -// acceptAndClose accepts a connection and then closes a listener. If the -// context becomes done or the utility VM terminates, the operation will be -// cancelled (but the listener will still be closed). -func (uvm *UtilityVM) acceptAndClose(ctx context.Context, l net.Listener) (net.Conn, error) { - var conn net.Conn - ch := make(chan error) - go func() { - var err error - conn, err = l.Accept() - ch <- err - }() - select { - case err := <-ch: - l.Close() - return conn, err - case <-ctx.Done(): - case <-uvm.exitCh: - } - l.Close() - err := <-ch - if err == nil { - return conn, err - } - // Prefer context error to VM error to accept error in order to return the - // most useful error. - if ctx.Err() != nil { - return nil, ctx.Err() - } - if uvm.exitErr != nil { - return nil, uvm.exitErr - } - return nil, err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/stats.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/stats.go deleted file mode 100644 index 2cd5c24ce0..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/stats.go +++ /dev/null @@ -1,158 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "strings" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/go-winio/pkg/process" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" - - "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" -) - -// checkProcess checks if the process identified by the given pid has a name -// matching `desiredProcessName`, and is running as a user with domain -// `desiredDomain` and user name `desiredUser`. If the process matches, it -// returns a handle to the process. If the process does not match, it returns -// 0. -func checkProcess(ctx context.Context, pid uint32, desiredProcessName string, desiredDomain string, desiredUser string) (p windows.Handle, err error) { - desiredProcessName = strings.ToUpper(desiredProcessName) - desiredDomain = strings.ToUpper(desiredDomain) - desiredUser = strings.ToUpper(desiredUser) - - p, err = windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION|windows.PROCESS_VM_READ, false, pid) - if err != nil { - return 0, err - } - defer func(openedProcess windows.Handle) { - // If we don't return this process handle, close it so it doesn't leak. - if p == 0 { - windows.Close(openedProcess) - } - }(p) - // Querying vmmem's image name as a win32 path returns ERROR_GEN_FAILURE - // for some reason, so we query it as an NT path instead. - name, err := process.QueryFullProcessImageName(p, process.ImageNameFormatNTPath) - if err != nil { - return 0, err - } - if strings.ToUpper(name) == desiredProcessName { - var t windows.Token - if err := windows.OpenProcessToken(p, windows.TOKEN_QUERY, &t); err != nil { - return 0, err - } - defer t.Close() - tUser, err := t.GetTokenUser() - if err != nil { - return 0, err - } - user, domain, _, err := tUser.User.Sid.LookupAccount("") - if err != nil { - return 0, err - } - log.G(ctx).WithFields(logrus.Fields{ - "name": name, - "domain": domain, - "user": user, - }).Debug("checking vmmem process identity") - if strings.ToUpper(domain) == desiredDomain && strings.ToUpper(user) == desiredUser { - return p, nil - } - } - return 0, nil -} - -// lookupVMMEM locates the vmmem process for a VM given the VM ID. It returns -// a handle to the vmmem process. The lookup is implemented by enumerating all -// processes on the system, and finding a process with full name "vmmem", -// running as "NT VIRTUAL MACHINE\". -func lookupVMMEM(ctx context.Context, vmID guid.GUID) (proc windows.Handle, err error) { - vmIDStr := strings.ToUpper(vmID.String()) - log.G(ctx).WithField("vmID", vmIDStr).Debug("looking up vmmem") - - pids, err := process.EnumProcesses() - if err != nil { - return 0, errors.Wrap(err, "failed to enumerate processes") - } - for _, pid := range pids { - p, err := checkProcess(ctx, pid, "vmmem", "NT VIRTUAL MACHINE", vmIDStr) - if err != nil { - // Checking the process could fail for a variety of reasons, such as - // the process exiting since we called EnumProcesses, or not having - // access to open the process (even as SYSTEM). In the case of an - // error, we just log and continue looking at the other processes. - log.G(ctx).WithField("pid", pid).Debug("failed to check process") - continue - } - if p != 0 { - log.G(ctx).WithField("pid", pid).Debug("found vmmem match") - return p, nil - } - } - return 0, errors.New("failed to find matching vmmem process") -} - -// getVMMEMProcess returns a handle to the vmmem process associated with this -// UVM. It only does the actual process lookup once, after which it caches the -// process handle in the UVM object. -func (uvm *UtilityVM) getVMMEMProcess(ctx context.Context) (windows.Handle, error) { - uvm.vmmemOnce.Do(func() { - uvm.vmmemProcess, uvm.vmmemErr = lookupVMMEM(ctx, uvm.runtimeID) - }) - return uvm.vmmemProcess, uvm.vmmemErr -} - -// Stats returns various UVM statistics. -func (uvm *UtilityVM) Stats(ctx context.Context) (*stats.VirtualMachineStatistics, error) { - s := &stats.VirtualMachineStatistics{} - props, err := uvm.hcsSystem.PropertiesV2(ctx, hcsschema.PTStatistics, hcsschema.PTMemory) - if err != nil { - return nil, err - } - s.Processor = &stats.VirtualMachineProcessorStatistics{} - s.Processor.TotalRuntimeNS = uint64(props.Statistics.Processor.TotalRuntime100ns * 100) - - s.Memory = &stats.VirtualMachineMemoryStatistics{} - if uvm.physicallyBacked { - // If the uvm is physically backed we set the working set to the total amount allocated - // to the UVM. AssignedMemory returns the number of 4KB pages. Will always be 4KB - // regardless of what the UVMs actual page size is so we don't need that information. - if props.Memory != nil { - s.Memory.WorkingSetBytes = props.Memory.VirtualMachineMemory.AssignedMemory * 4096 - } - } else { - // The HCS properties does not return sufficient information to calculate - // working set size for a VA-backed UVM. To work around this, we instead - // locate the vmmem process for the VM, and query that process's working set - // instead, which will be the working set for the VM. - vmmemProc, err := uvm.getVMMEMProcess(ctx) - if err != nil { - return nil, err - } - memCounters, err := process.GetProcessMemoryInfo(vmmemProc) - if err != nil { - return nil, err - } - s.Memory.WorkingSetBytes = uint64(memCounters.WorkingSetSize) - } - - if props.Memory != nil { - s.Memory.VirtualNodeCount = props.Memory.VirtualNodeCount - s.Memory.VmMemory = &stats.VirtualMachineMemory{} - s.Memory.VmMemory.AvailableMemory = props.Memory.VirtualMachineMemory.AvailableMemory - s.Memory.VmMemory.AvailableMemoryBuffer = props.Memory.VirtualMachineMemory.AvailableMemoryBuffer - s.Memory.VmMemory.ReservedMemory = props.Memory.VirtualMachineMemory.ReservedMemory - s.Memory.VmMemory.AssignedMemory = props.Memory.VirtualMachineMemory.AssignedMemory - s.Memory.VmMemory.SlpActive = props.Memory.VirtualMachineMemory.SlpActive - s.Memory.VmMemory.BalancingEnabled = props.Memory.VirtualMachineMemory.BalancingEnabled - s.Memory.VmMemory.DmOperationInProgress = props.Memory.VirtualMachineMemory.DmOperationInProgress - } - return s, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/timezone.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/timezone.go deleted file mode 100644 index 3ce7b9764f..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/timezone.go +++ /dev/null @@ -1,60 +0,0 @@ -//go:build windows - -package uvm - -import ( - "fmt" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "golang.org/x/sys/windows" -) - -// UTC has everything set to 0's. Just need to fill in the pointer fields and string identifiers. -var utcTimezone = &hcsschema.TimeZoneInformation{ - StandardName: "Coordinated Universal Time", - DaylightName: "Coordinated Universal Time", - StandardDate: &hcsschema.SystemTime{}, - DaylightDate: &hcsschema.SystemTime{}, -} - -// getTimezone returns the hosts timezone in an HCS TimeZoneInformation structure and an error if there -// is one. -func getTimezone() (*hcsschema.TimeZoneInformation, error) { - var tz windows.Timezoneinformation - _, err := windows.GetTimeZoneInformation(&tz) - if err != nil { - return nil, fmt.Errorf("failed to get time zone information: %w", err) - } - return tziToHCSSchema(&tz), nil -} - -// TZIToHCSSchema converts a windows.TimeZoneInformation (TIME_ZONE_INFORMATION) to the hcs schema equivalent. -func tziToHCSSchema(tzi *windows.Timezoneinformation) *hcsschema.TimeZoneInformation { - return &hcsschema.TimeZoneInformation{ - Bias: tzi.Bias, - StandardName: windows.UTF16ToString(tzi.StandardName[:]), - StandardDate: &hcsschema.SystemTime{ - Year: int32(tzi.StandardDate.Year), - Month: int32(tzi.StandardDate.Month), - DayOfWeek: int32(tzi.StandardDate.DayOfWeek), - Day: int32(tzi.StandardDate.Day), - Hour: int32(tzi.StandardDate.Hour), - Second: int32(tzi.StandardDate.Second), - Minute: int32(tzi.StandardDate.Minute), - Milliseconds: int32(tzi.StandardDate.Milliseconds), - }, - StandardBias: tzi.StandardBias, - DaylightName: windows.UTF16ToString(tzi.DaylightName[:]), - DaylightDate: &hcsschema.SystemTime{ - Year: int32(tzi.DaylightDate.Year), - Month: int32(tzi.DaylightDate.Month), - DayOfWeek: int32(tzi.DaylightDate.DayOfWeek), - Day: int32(tzi.DaylightDate.Day), - Hour: int32(tzi.DaylightDate.Hour), - Second: int32(tzi.DaylightDate.Second), - Minute: int32(tzi.DaylightDate.Minute), - Milliseconds: int32(tzi.DaylightDate.Milliseconds), - }, - DaylightBias: tzi.DaylightBias, - } -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go deleted file mode 100644 index 020eb7099b..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go +++ /dev/null @@ -1,149 +0,0 @@ -//go:build windows - -package uvm - -import ( - "net" - "sync" - - "github.com/Microsoft/go-winio/pkg/guid" - "golang.org/x/sys/windows" - - "github.com/Microsoft/hcsshim/internal/gcs" - "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - "github.com/Microsoft/hcsshim/internal/hns" -) - -// | WCOW | LCOW -// Container scratch | SCSI | SCSI -// Scratch space | ---- | SCSI // For file system utilities. /tmp/scratch -// Read-Only Layer | VSMB | VPMEM -// Mapped Directory | VSMB | PLAN9 - -type nicInfo struct { - ID string - Endpoint *hns.HNSEndpoint -} - -type namespaceInfo struct { - nics map[string]*nicInfo -} - -// UtilityVM is the object used by clients representing a utility VM -type UtilityVM struct { - id string // Identifier for the utility VM (user supplied or generated) - runtimeID guid.GUID // Hyper-V VM ID - owner string // Owner for the utility VM (user supplied or generated) - operatingSystem string // "windows" or "linux" - hcsSystem *hcs.System // The handle to the compute system - gcListener net.Listener // The GCS connection listener - gc *gcs.GuestConnection // The GCS connection - processorCount int32 - physicallyBacked bool // If the uvm is backed by physical memory and not virtual memory - m sync.Mutex // Lock for adding/removing devices - - exitErr error - exitCh chan struct{} - - // devicesPhysicallyBacked indicates if additional devices added to a uvm should be - // entirely physically backed - devicesPhysicallyBacked bool - - // GCS bridge protocol and capabilities - protocol uint32 - guestCaps schema1.GuestDefinedCapabilities - - // containerCounter is the current number of containers that have been - // created. This is never decremented in the life of the UVM. - // - // NOTE: All accesses to this MUST be done atomically. - containerCounter uint64 - - // noWritableFileShares disables mounting any writable vSMB or Plan9 shares - // on the uVM. This prevents containers in the uVM modifying files and directories - // made available via the "mounts" options in the container spec, or shared - // to the uVM directly. - // This option does not prevent writable SCSI mounts. - noWritableFileShares bool - - // VSMB shares that are mapped into a Windows UVM. These are used for read-only - // layers and mapped directories. - // We maintain two sets of maps, `vsmbDirShares` tracks shares that are - // unrestricted mappings of directories. `vsmbFileShares` tracks shares that - // are restricted to some subset of files in the directory. This is used as - // part of a temporary fix to allow WCOW single-file mapping to function. - vsmbDirShares map[string]*VSMBShare - vsmbFileShares map[string]*VSMBShare - vsmbCounter uint64 // Counter to generate a unique share name for each VSMB share. - vsmbNoDirectMap bool // indicates if VSMB devices should be added with the `NoDirectMap` option - - // VPMEM devices that are mapped into a Linux UVM. These are used for read-only layers, or for - // booting from VHD. - vpmemMaxCount uint32 // The max number of VPMem devices. - vpmemMaxSizeBytes uint64 // The max size of the layer in bytes per vPMem device. - vpmemMultiMapping bool // Enable mapping multiple VHDs onto a single VPMem device - vpmemDevicesDefault [MaxVPMEMCount]*vPMemInfoDefault - vpmemDevicesMultiMapped [MaxVPMEMCount]*vPMemInfoMulti - - // SCSI devices that are mapped into a Windows or Linux utility VM - scsiLocations [4][64]*SCSIMount // Hyper-V supports 4 controllers, 64 slots per controller. Limited to 1 controller for now though. - scsiControllerCount uint32 // Number of SCSI controllers in the utility VM - encryptScratch bool // Enable scratch encryption - - vpciDevices map[VPCIDeviceKey]*VPCIDevice // map of device instance id to vpci device - - // Plan9 are directories mapped into a Linux utility VM - plan9Counter uint64 // Each newly-added plan9 share has a counter used as its ID in the ResourceURI and for the name - - namespaces map[string]*namespaceInfo - - outputListener net.Listener - outputProcessingDone chan struct{} - outputHandler OutputHandler - - entropyListener net.Listener - - // Handle to the vmmem process associated with this UVM. Used to look up - // memory metrics for the UVM. - vmmemProcess windows.Handle - // Tracks the error returned when looking up the vmmem process. - vmmemErr error - // We only need to look up the vmmem process once, then we keep a handle - // open. - vmmemOnce sync.Once - - // mountCounter is the number of mounts that have been added to the UVM - // This is used in generating a unique mount path inside the UVM for every mount. - // Access to this variable should be done atomically. - mountCounter uint64 - - // specifies if this UVM is created to be saved as a template - IsTemplate bool - - // specifies if this UVM is a cloned from a template - IsClone bool - - // ID of the template from which this clone was created. Only applies when IsClone - // is true - TemplateID string - - // Location that container process dumps will get written too. - processDumpLocation string - - // The CreateOpts used to create this uvm. These can be either of type - // uvm.OptionsLCOW or uvm.OptionsWCOW - createOpts interface{} - - // Network config proxy client. If nil then this wasn't requested and the - // uvms network will be configured locally. - ncProxyClientAddress string - - // networkSetup handles the logic for setting up and tearing down any network configuration - // for the Utility VM. - networkSetup NetworkSetup - - // noInheritHostTimezone specifies whether to not inherit the hosts timezone for the UVM. UTC will be set as the default instead. - // This only applies for WCOW. - noInheritHostTimezone bool -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/update_uvm.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/update_uvm.go deleted file mode 100644 index b8fcddac6e..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/update_uvm.go +++ /dev/null @@ -1,66 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/pkg/annotations" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func (uvm *UtilityVM) UpdateConstraints(ctx context.Context, data interface{}, annots map[string]string) error { - var memoryLimitInBytes *uint64 - var processorLimits *hcsschema.ProcessorLimits - - switch resources := data.(type) { - case *specs.WindowsResources: - if resources.Memory != nil { - memoryLimitInBytes = resources.Memory.Limit - } - if resources.CPU != nil { - processorLimits := &hcsschema.ProcessorLimits{} - if resources.CPU.Maximum != nil { - processorLimits.Limit = uint64(*resources.CPU.Maximum) - } - if resources.CPU.Shares != nil { - processorLimits.Weight = uint64(*resources.CPU.Shares) - } - } - case *specs.LinuxResources: - if resources.Memory != nil { - mem := uint64(*resources.Memory.Limit) - memoryLimitInBytes = &mem - } - if resources.CPU != nil { - processorLimits := &hcsschema.ProcessorLimits{} - if resources.CPU.Quota != nil { - processorLimits.Limit = uint64(*resources.CPU.Quota) - } - if resources.CPU.Shares != nil { - processorLimits.Weight = uint64(*resources.CPU.Shares) - } - } - } - - if memoryLimitInBytes != nil { - if err := uvm.UpdateMemory(ctx, *memoryLimitInBytes); err != nil { - return err - } - } - if processorLimits != nil { - if err := uvm.UpdateCPULimits(ctx, processorLimits); err != nil { - return err - } - } - - // Check if an annotation was sent to update cpugroup membership - if cpuGroupID, ok := annots[annotations.CPUGroupID]; ok { - if err := uvm.SetCPUGroup(ctx, cpuGroupID); err != nil { - return err - } - } - - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/virtual_device.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/virtual_device.go deleted file mode 100644 index 3bd6e187a9..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/virtual_device.go +++ /dev/null @@ -1,175 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - - "github.com/Microsoft/go-winio/pkg/guid" - - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/protocol/guestresource" -) - -const ( - GPUDeviceIDType = "gpu" - VPCILocationPathIDType = "vpci-location-path" - VPCIClassGUIDTypeLegacy = "class" - VPCIClassGUIDType = "vpci-class-guid" - VPCIDeviceIDTypeLegacy = "vpci" - VPCIDeviceIDType = "vpci-instance-id" -) - -// this is the well known channel type GUID defined by VMBUS for all assigned devices -const vmbusChannelTypeGUIDFormatted = "{44c4f61d-4444-4400-9d52-802e27ede19f}" -const assignedDeviceEnumerator = "VMBUS" - -type VPCIDeviceKey struct { - deviceInstanceID string - virtualFunctionIndex uint16 -} - -// VPCIDevice represents a vpci device. Holds its guid and a handle to the uvm it -// belongs to. -type VPCIDevice struct { - // vm is the handle to the UVM that this device belongs to - vm *UtilityVM - // VMBusGUID is the instance ID for this device when it is exposed via VMBus - VMBusGUID string - // deviceInstanceID is the instance ID of the device on the host - deviceInstanceID string - // virtualFunctionIndex is the function index for the pci device to assign - virtualFunctionIndex uint16 - // refCount stores the number of references to this device in the UVM - refCount uint32 -} - -// GetAssignedDeviceVMBUSInstanceID returns the instance ID of the VMBUS channel device node created. -// -// When a device is assigned to a UVM via VPCI support in HCS, a new VMBUS channel device node is -// created in the UVM. The actual device that was assigned in is exposed as a child on this VMBUS -// channel device node. -// -// A device node's instance ID is an identifier that distinguishes that device from other devices -// on the system. The GUID of a VMBUS channel device node refers to that channel's unique -// identifier used internally by VMBUS and can be used to determine the VMBUS channel -// device node's instance ID. -// -// A VMBUS channel device node's instance ID is in the form: -// "VMBUS\vmbusChannelTypeGUIDFormatted\{vmBusChannelGUID}" -func (uvm *UtilityVM) GetAssignedDeviceVMBUSInstanceID(vmBusChannelGUID string) string { - return fmt.Sprintf("%s\\%s\\{%s}", assignedDeviceEnumerator, vmbusChannelTypeGUIDFormatted, vmBusChannelGUID) -} - -// Release frees the resources of the corresponding vpci device -func (vpci *VPCIDevice) Release(ctx context.Context) error { - if err := vpci.vm.RemoveDevice(ctx, vpci.deviceInstanceID, vpci.virtualFunctionIndex); err != nil { - return fmt.Errorf("failed to remove VPCI device: %s", err) - } - return nil -} - -// AssignDevice assigns a vpci device to the uvm -// if the device already exists, the stored VPCIDevice's ref count is increased -// and the VPCIDevice is returned. -// Otherwise, a new request is made to assign the target device indicated by the deviceID -// onto the UVM. A new VPCIDevice entry is made on the UVM and the VPCIDevice is returned -// to the caller. -// Allow callers to specify the vmbus guid they want the device to show up with. -func (uvm *UtilityVM) AssignDevice(ctx context.Context, deviceID string, index uint16, vmBusGUID string) (*VPCIDevice, error) { - if vmBusGUID == "" { - guid, err := guid.NewV4() - if err != nil { - return nil, err - } - vmBusGUID = guid.String() - } - - key := VPCIDeviceKey{ - deviceInstanceID: deviceID, - virtualFunctionIndex: index, - } - - uvm.m.Lock() - defer uvm.m.Unlock() - - existingVPCIDevice := uvm.vpciDevices[key] - if existingVPCIDevice != nil { - existingVPCIDevice.refCount++ - return existingVPCIDevice, nil - } - - targetDevice := hcsschema.VirtualPciDevice{ - Functions: []hcsschema.VirtualPciFunction{ - { - DeviceInstancePath: deviceID, - VirtualFunction: index, - }, - }, - } - - request := &hcsschema.ModifySettingRequest{ - ResourcePath: fmt.Sprintf(resourcepaths.VirtualPCIResourceFormat, vmBusGUID), - RequestType: guestrequest.RequestTypeAdd, - Settings: targetDevice, - } - - // WCOW (when supported) does not require a guest request as part of the - // device assignment - if uvm.operatingSystem != "windows" { - // for LCOW, we need to make sure that specific paths relating to the - // device exist so they are ready to be used by later - // work in openGCS - request.GuestRequest = guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeVPCIDevice, - RequestType: guestrequest.RequestTypeAdd, - Settings: guestresource.LCOWMappedVPCIDevice{ - VMBusGUID: vmBusGUID, - }, - } - } - - if err := uvm.modify(ctx, request); err != nil { - return nil, err - } - result := &VPCIDevice{ - vm: uvm, - VMBusGUID: vmBusGUID, - deviceInstanceID: deviceID, - virtualFunctionIndex: index, - refCount: 1, - } - uvm.vpciDevices[key] = result - return result, nil -} - -// RemoveDevice removes a vpci device from a uvm when there are -// no more references to a given VPCIDevice. Otherwise, decrements -// the reference count of the stored VPCIDevice and returns nil. -func (uvm *UtilityVM) RemoveDevice(ctx context.Context, deviceInstanceID string, index uint16) error { - key := VPCIDeviceKey{ - deviceInstanceID: deviceInstanceID, - virtualFunctionIndex: index, - } - - uvm.m.Lock() - defer uvm.m.Unlock() - - vpci := uvm.vpciDevices[key] - if vpci == nil { - return fmt.Errorf("no device with ID %s and index %d is present on the uvm %s", deviceInstanceID, index, uvm.ID()) - } - - vpci.refCount-- - if vpci.refCount == 0 { - delete(uvm.vpciDevices, key) - return uvm.modify(ctx, &hcsschema.ModifySettingRequest{ - ResourcePath: fmt.Sprintf(resourcepaths.VirtualPCIResourceFormat, vpci.VMBusGUID), - RequestType: guestrequest.RequestTypeRemove, - }) - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go deleted file mode 100644 index cde51aa014..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go +++ /dev/null @@ -1,264 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - "os" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/Microsoft/hcsshim/ext4/dmverity" - "github.com/Microsoft/hcsshim/ext4/tar2ext4" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/protocol/guestresource" -) - -const ( - lcowDefaultVPMemLayerFmt = "/run/layers/p%d" -) - -var ( - // ErrMaxVPMemLayerSize is the error returned when the size of `hostPath` is - // greater than the max vPMem layer size set at create time. - ErrMaxVPMemLayerSize = errors.New("layer size is to large for VPMEM max size") -) - -type vPMemInfoDefault struct { - hostPath string - uvmPath string - refCount uint32 -} - -func newDefaultVPMemInfo(hostPath, uvmPath string) *vPMemInfoDefault { - return &vPMemInfoDefault{ - hostPath: hostPath, - uvmPath: uvmPath, - refCount: 1, - } -} - -// fileSystemSize retrieves ext4 fs SuperBlock and returns the file system size and block size -func fileSystemSize(vhdPath string) (int64, int, error) { - sb, err := tar2ext4.ReadExt4SuperBlock(vhdPath) - if err != nil { - return 0, 0, errors.Wrap(err, "failed to read ext4 super block") - } - blockSize := 1024 * (1 << sb.LogBlockSize) - fsSize := int64(blockSize) * int64(sb.BlocksCountLow) - return fsSize, blockSize, nil -} - -// readVeritySuperBlock reads ext4 super block for a given VHD to then further read the dm-verity super block -// and root hash -func readVeritySuperBlock(ctx context.Context, layerPath string) (*guestresource.DeviceVerityInfo, error) { - // dm-verity information is expected to be appended, the size of ext4 data will be the offset - // of the dm-verity super block, followed by merkle hash tree - ext4SizeInBytes, ext4BlockSize, err := fileSystemSize(layerPath) - if err != nil { - return nil, err - } - - dmvsb, err := dmverity.ReadDMVerityInfo(layerPath, ext4SizeInBytes) - if err != nil { - return nil, errors.Wrap(err, "failed to read dm-verity super block") - } - log.G(ctx).WithFields(logrus.Fields{ - "layerPath": layerPath, - "rootHash": dmvsb.RootDigest, - "algorithm": dmvsb.Algorithm, - "salt": dmvsb.Salt, - "dataBlocks": dmvsb.DataBlocks, - "dataBlockSize": dmvsb.DataBlockSize, - }).Debug("dm-verity information") - - return &guestresource.DeviceVerityInfo{ - Ext4SizeInBytes: ext4SizeInBytes, - BlockSize: ext4BlockSize, - RootDigest: dmvsb.RootDigest, - Algorithm: dmvsb.Algorithm, - Salt: dmvsb.Salt, - Version: int(dmvsb.Version), - SuperBlock: true, - }, nil -} - -// findNextVPMemSlot finds next available VPMem slot. -// -// Lock MUST be held when calling this function. -func (uvm *UtilityVM) findNextVPMemSlot(ctx context.Context, hostPath string) (uint32, error) { - for i := uint32(0); i < uvm.vpmemMaxCount; i++ { - if uvm.vpmemDevicesDefault[i] == nil { - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": hostPath, - "deviceNumber": i, - }).Debug("allocated VPMem location") - return i, nil - } - } - return 0, ErrNoAvailableLocation -} - -// findVPMemSlot looks up `findThisHostPath` in already mounted VPMem devices -// -// Lock MUST be held when calling this function -func (uvm *UtilityVM) findVPMemSlot(ctx context.Context, findThisHostPath string) (uint32, error) { - for i := uint32(0); i < uvm.vpmemMaxCount; i++ { - if vi := uvm.vpmemDevicesDefault[i]; vi != nil && vi.hostPath == findThisHostPath { - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": vi.hostPath, - "uvmPath": vi.uvmPath, - "refCount": vi.refCount, - "deviceNumber": i, - }).Debug("found VPMem location") - return i, nil - } - } - return 0, ErrNotAttached -} - -// addVPMemDefault adds a VPMem disk to a utility VM at the next available location and -// returns the UVM path where the layer was mounted. -func (uvm *UtilityVM) addVPMemDefault(ctx context.Context, hostPath string) (_ string, err error) { - if devNumber, err := uvm.findVPMemSlot(ctx, hostPath); err == nil { - device := uvm.vpmemDevicesDefault[devNumber] - device.refCount++ - return device.uvmPath, nil - } - - fi, err := os.Stat(hostPath) - if err != nil { - return "", err - } - if uint64(fi.Size()) > uvm.vpmemMaxSizeBytes { - return "", ErrMaxVPMemLayerSize - } - - deviceNumber, err := uvm.findNextVPMemSlot(ctx, hostPath) - if err != nil { - return "", err - } - - modification := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeAdd, - Settings: hcsschema.VirtualPMemDevice{ - HostPath: hostPath, - ReadOnly: true, - ImageFormat: "Vhd1", - }, - ResourcePath: fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber), - } - - uvmPath := fmt.Sprintf(lcowDefaultVPMemLayerFmt, deviceNumber) - guestSettings := guestresource.LCOWMappedVPMemDevice{ - DeviceNumber: deviceNumber, - MountPath: uvmPath, - } - if v, iErr := readVeritySuperBlock(ctx, hostPath); iErr != nil { - log.G(ctx).WithError(iErr).WithField("hostPath", hostPath).Debug("unable to read dm-verity information from VHD") - } else { - if v != nil { - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": hostPath, - "rootDigest": v.RootDigest, - }).Debug("adding VPMem with dm-verity") - } - guestSettings.VerityInfo = v - } - - modification.GuestRequest = guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeVPMemDevice, - RequestType: guestrequest.RequestTypeAdd, - Settings: guestSettings, - } - - if err := uvm.modify(ctx, modification); err != nil { - return "", errors.Errorf("uvm::addVPMemDefault: failed to modify utility VM configuration: %s", err) - } - - uvm.vpmemDevicesDefault[deviceNumber] = newDefaultVPMemInfo(hostPath, uvmPath) - return uvmPath, nil -} - -// removeVPMemDefault removes a VPMem disk from a Utility VM. If the `hostPath` is not -// attached returns `ErrNotAttached`. -func (uvm *UtilityVM) removeVPMemDefault(ctx context.Context, hostPath string) error { - deviceNumber, err := uvm.findVPMemSlot(ctx, hostPath) - if err != nil { - return err - } - - device := uvm.vpmemDevicesDefault[deviceNumber] - if device.refCount > 1 { - device.refCount-- - return nil - } - - var verity *guestresource.DeviceVerityInfo - if v, _ := readVeritySuperBlock(ctx, hostPath); v != nil { - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": hostPath, - "rootDigest": v.RootDigest, - }).Debug("removing VPMem with dm-verity") - verity = v - } - modification := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeRemove, - ResourcePath: fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber), - GuestRequest: guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeVPMemDevice, - RequestType: guestrequest.RequestTypeRemove, - Settings: guestresource.LCOWMappedVPMemDevice{ - DeviceNumber: deviceNumber, - MountPath: device.uvmPath, - VerityInfo: verity, - }, - }, - } - if err := uvm.modify(ctx, modification); err != nil { - return errors.Errorf("failed to remove VPMEM %s from utility VM %s: %s", hostPath, uvm.id, err) - } - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": device.hostPath, - "uvmPath": device.uvmPath, - "refCount": device.refCount, - "deviceNumber": deviceNumber, - }).Debug("removed VPMEM location") - - uvm.vpmemDevicesDefault[deviceNumber] = nil - - return nil -} - -func (uvm *UtilityVM) AddVPMem(ctx context.Context, hostPath string) (string, error) { - if uvm.operatingSystem != "linux" { - return "", errNotSupported - } - - uvm.m.Lock() - defer uvm.m.Unlock() - - if uvm.vpmemMultiMapping { - return uvm.addVPMemMappedDevice(ctx, hostPath) - } - return uvm.addVPMemDefault(ctx, hostPath) -} - -func (uvm *UtilityVM) RemoveVPMem(ctx context.Context, hostPath string) error { - if uvm.operatingSystem != "linux" { - return errNotSupported - } - - uvm.m.Lock() - defer uvm.m.Unlock() - - if uvm.vpmemMultiMapping { - return uvm.removeVPMemMappedDevice(ctx, hostPath) - } - return uvm.removeVPMemDefault(ctx, hostPath) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem_mapped.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem_mapped.go deleted file mode 100644 index 510e9ed4c4..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem_mapped.go +++ /dev/null @@ -1,321 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - "os" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/memory" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/protocol/guestresource" -) - -const ( - PageSize = 0x1000 - MaxMappedDeviceCount = 1024 -) - -const lcowPackedVPMemLayerFmt = "/run/layers/p%d-%d-%d" - -type mappedDeviceInfo struct { - vPMemInfoDefault - mappedRegion memory.MappedRegion - sizeInBytes uint64 -} - -type vPMemInfoMulti struct { - memory.PoolAllocator - maxSize uint64 - maxMappedDeviceCount uint32 - mappings map[string]*mappedDeviceInfo -} - -func newVPMemMappedDevice(hostPath, uvmPath string, sizeBytes uint64, memReg memory.MappedRegion) *mappedDeviceInfo { - return &mappedDeviceInfo{ - vPMemInfoDefault: vPMemInfoDefault{ - hostPath: hostPath, - uvmPath: uvmPath, - refCount: 1, - }, - mappedRegion: memReg, - sizeInBytes: sizeBytes, - } -} - -func newPackedVPMemDevice() *vPMemInfoMulti { - return &vPMemInfoMulti{ - PoolAllocator: memory.NewPoolMemoryAllocator(), - maxSize: DefaultVPMemSizeBytes, - mappings: make(map[string]*mappedDeviceInfo), - maxMappedDeviceCount: MaxMappedDeviceCount, - } -} - -func pageAlign(t uint64) uint64 { - if t%PageSize == 0 { - return t - } - return (t/PageSize + 1) * PageSize -} - -// newMappedVPMemModifyRequest creates an hcsschema.ModifySettingsRequest to modify VPMem devices/mappings -// for the multi-mapping setup -func newMappedVPMemModifyRequest( - ctx context.Context, - rType guestrequest.RequestType, - deviceNumber uint32, - md *mappedDeviceInfo, - uvm *UtilityVM, -) (*hcsschema.ModifySettingRequest, error) { - guestSettings := guestresource.LCOWMappedVPMemDevice{ - DeviceNumber: deviceNumber, - MountPath: md.uvmPath, - MappingInfo: &guestresource.LCOWVPMemMappingInfo{ - DeviceOffsetInBytes: md.mappedRegion.Offset(), - DeviceSizeInBytes: md.sizeInBytes, - }, - } - - if verity, err := readVeritySuperBlock(ctx, md.hostPath); err != nil { - log.G(ctx).WithError(err).WithField("hostPath", md.hostPath).Debug("unable to read dm-verity information from VHD") - } else { - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": md.hostPath, - "rootDigest": verity.RootDigest, - }).Debug("adding multi-mapped VPMem with dm-verity") - guestSettings.VerityInfo = verity - } - - request := &hcsschema.ModifySettingRequest{ - RequestType: rType, - GuestRequest: guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeVPMemDevice, - RequestType: rType, - Settings: guestSettings, - }, - } - - pmem := uvm.vpmemDevicesMultiMapped[deviceNumber] - switch rType { - case guestrequest.RequestTypeAdd: - if pmem == nil { - request.Settings = hcsschema.VirtualPMemDevice{ - ReadOnly: true, - HostPath: md.hostPath, - ImageFormat: "Vhd1", - } - request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber) - } else { - request.Settings = hcsschema.VirtualPMemMapping{ - HostPath: md.hostPath, - ImageFormat: "Vhd1", - } - request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemDeviceResourceFormat, deviceNumber, md.mappedRegion.Offset()) - } - case guestrequest.RequestTypeRemove: - if pmem == nil { - return nil, errors.Errorf("no device found at location %d", deviceNumber) - } - if len(pmem.mappings) == 1 { - request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber) - } else { - request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemDeviceResourceFormat, deviceNumber, md.mappedRegion.Offset()) - } - default: - return nil, errors.New("unsupported request type") - } - - log.G(ctx).WithFields(logrus.Fields{ - "deviceNumber": deviceNumber, - "hostPath": md.hostPath, - "uvmPath": md.uvmPath, - }).Debugf("new mapped VPMem modify request: %v", request) - return request, nil -} - -// mapVHDLayer adds `device` to mappings -func (pmem *vPMemInfoMulti) mapVHDLayer(ctx context.Context, device *mappedDeviceInfo) (err error) { - if md, ok := pmem.mappings[device.hostPath]; ok { - md.refCount++ - return nil - } - - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": device.hostPath, - "mountPath": device.uvmPath, - "deviceOffset": device.mappedRegion.Offset(), - "deviceSize": device.sizeInBytes, - }).Debug("mapped new device") - - pmem.mappings[device.hostPath] = device - return nil -} - -// unmapVHDLayer removes mapped device with `hostPath` from mappings and releases allocated memory -func (pmem *vPMemInfoMulti) unmapVHDLayer(ctx context.Context, hostPath string) (err error) { - dev, ok := pmem.mappings[hostPath] - if !ok { - return ErrNotAttached - } - - if dev.refCount > 1 { - dev.refCount-- - return nil - } - - if err := pmem.Release(dev.mappedRegion); err != nil { - return err - } - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": dev.hostPath, - }).Debugf("Done releasing resources: %s", dev.hostPath) - delete(pmem.mappings, hostPath) - return nil -} - -// findVPMemMappedDevice finds a VHD device that's been mapped on VPMem surface -func (uvm *UtilityVM) findVPMemMappedDevice(ctx context.Context, hostPath string) (uint32, *mappedDeviceInfo, error) { - for i := uint32(0); i < uvm.vpmemMaxCount; i++ { - vi := uvm.vpmemDevicesMultiMapped[i] - if vi != nil { - if vhd, ok := vi.mappings[hostPath]; ok { - log.G(ctx).WithFields(logrus.Fields{ - "deviceNumber": i, - "hostPath": hostPath, - "uvmPath": vhd.uvmPath, - "refCount": vhd.refCount, - "deviceSize": vhd.sizeInBytes, - "deviceOffset": vhd.mappedRegion.Offset(), - }).Debug("found mapped VHD") - return i, vhd, nil - } - } - } - return 0, nil, ErrNotAttached -} - -// allocateNextVPMemMappedDeviceLocation allocates a memory region with a minimum offset on the VPMem surface, -// where the device with a given `devSize` can be mapped. -func (uvm *UtilityVM) allocateNextVPMemMappedDeviceLocation(ctx context.Context, devSize uint64) (uint32, memory.MappedRegion, error) { - // device size has to be page aligned - devSize = pageAlign(devSize) - - for i := uint32(0); i < uvm.vpmemMaxCount; i++ { - pmem := uvm.vpmemDevicesMultiMapped[i] - if pmem == nil { - pmem = newPackedVPMemDevice() - uvm.vpmemDevicesMultiMapped[i] = pmem - } - - if len(pmem.mappings) >= int(pmem.maxMappedDeviceCount) { - continue - } - - reg, err := pmem.Allocate(devSize) - if err != nil { - continue - } - log.G(ctx).WithFields(logrus.Fields{ - "deviceNumber": i, - "deviceOffset": reg.Offset(), - "deviceSize": devSize, - }).Debug("found offset for mapped VHD on an existing VPMem device") - return i, reg, nil - } - return 0, nil, ErrNoAvailableLocation -} - -// addVPMemMappedDevice adds container layer as a mapped device, first mapped device is added as a regular -// VPMem device, but subsequent additions will call into mapping APIs -// -// Lock MUST be held when calling this function -func (uvm *UtilityVM) addVPMemMappedDevice(ctx context.Context, hostPath string) (_ string, err error) { - if _, dev, err := uvm.findVPMemMappedDevice(ctx, hostPath); err == nil { - dev.refCount++ - return dev.uvmPath, nil - } - - st, err := os.Stat(hostPath) - if err != nil { - return "", err - } - // NOTE: On the guest side devSize is used to create a device mapper linear target, which is then used to create - // device mapper verity target. Since the dm-verity hash device is appended after ext4 data, we need the full size - // on disk (minus VHD footer), otherwise the resulting linear target will have hash device truncated and verity - // target creation will fail as a result. - devSize := pageAlign(uint64(st.Size())) - deviceNumber, memReg, err := uvm.allocateNextVPMemMappedDeviceLocation(ctx, devSize) - if err != nil { - return "", err - } - defer func() { - if err != nil { - pmem := uvm.vpmemDevicesMultiMapped[deviceNumber] - if err := pmem.Release(memReg); err != nil { - log.G(ctx).WithError(err).Debugf("failed to reclaim pmem region: %s", err) - } - } - }() - - uvmPath := fmt.Sprintf(lcowPackedVPMemLayerFmt, deviceNumber, memReg.Offset(), devSize) - md := newVPMemMappedDevice(hostPath, uvmPath, devSize, memReg) - modification, err := newMappedVPMemModifyRequest(ctx, guestrequest.RequestTypeAdd, deviceNumber, md, uvm) - if err := uvm.modify(ctx, modification); err != nil { - return "", errors.Errorf("uvm::addVPMemMappedDevice: failed to modify utility VM configuration: %s", err) - } - defer func() { - if err != nil { - rmRequest, _ := newMappedVPMemModifyRequest(ctx, guestrequest.RequestTypeRemove, deviceNumber, md, uvm) - if err := uvm.modify(ctx, rmRequest); err != nil { - log.G(ctx).WithError(err).Debugf("failed to rollback modification") - } - } - }() - - pmem := uvm.vpmemDevicesMultiMapped[deviceNumber] - if err := pmem.mapVHDLayer(ctx, md); err != nil { - return "", errors.Wrapf(err, "failed to update internal state") - } - return uvmPath, nil -} - -// removeVPMemMappedDevice removes a mapped container layer, if the layer is the last to be removed, removes -// VPMem device instead -// -// Lock MUST be held when calling this function -func (uvm *UtilityVM) removeVPMemMappedDevice(ctx context.Context, hostPath string) error { - devNum, md, err := uvm.findVPMemMappedDevice(ctx, hostPath) - if err != nil { - return err - } - if md.refCount > 1 { - md.refCount-- - return nil - } - - modification, err := newMappedVPMemModifyRequest(ctx, guestrequest.RequestTypeRemove, devNum, md, uvm) - if err != nil { - return err - } - - if err := uvm.modify(ctx, modification); err != nil { - return errors.Errorf("failed to remove packed VPMem %s from UVM %s: %s", md.hostPath, uvm.id, err) - } - - pmem := uvm.vpmemDevicesMultiMapped[devNum] - if err := pmem.unmapVHDLayer(ctx, hostPath); err != nil { - log.G(ctx).WithError(err).Debugf("failed unmapping VHD layer %s", hostPath) - } - if len(pmem.mappings) == 0 { - uvm.vpmemDevicesMultiMapped[devNum] = nil - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go deleted file mode 100644 index 348058c7ef..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go +++ /dev/null @@ -1,435 +0,0 @@ -//go:build windows - -package uvm - -import ( - "bytes" - "context" - "encoding/gob" - "fmt" - "os" - "path/filepath" - "strconv" - "unsafe" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" - - "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/winapi" - "github.com/Microsoft/hcsshim/osversion" -) - -const ( - vsmbSharePrefix = `\\?\VMSMB\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\` - vsmbCurrentSerialVersionID uint32 = 1 -) - -// VSMBShare contains the host path for a Vsmb Mount -type VSMBShare struct { - // UVM the resource belongs to - vm *UtilityVM - HostPath string - refCount uint32 - name string - allowedFiles []string - guestPath string - options hcsschema.VirtualSmbShareOptions - serialVersionID uint32 -} - -// Release frees the resources of the corresponding vsmb Mount -func (vsmb *VSMBShare) Release(ctx context.Context) error { - if err := vsmb.vm.RemoveVSMB(ctx, vsmb.HostPath, vsmb.options.ReadOnly); err != nil { - return fmt.Errorf("failed to remove VSMB share: %s", err) - } - return nil -} - -// DefaultVSMBOptions returns the default VSMB options. If readOnly is specified, -// returns the default VSMB options for a readonly share. -func (uvm *UtilityVM) DefaultVSMBOptions(readOnly bool) *hcsschema.VirtualSmbShareOptions { - opts := &hcsschema.VirtualSmbShareOptions{ - NoDirectmap: uvm.DevicesPhysicallyBacked() || uvm.VSMBNoDirectMap(), - } - if readOnly { - opts.ShareRead = true - opts.CacheIo = true - opts.ReadOnly = true - opts.PseudoOplocks = true - } - return opts -} - -func (uvm *UtilityVM) SetSaveableVSMBOptions(opts *hcsschema.VirtualSmbShareOptions, readOnly bool) { - if readOnly { - opts.ShareRead = true - opts.CacheIo = true - opts.ReadOnly = true - opts.PseudoOplocks = true - opts.NoOplocks = false - } else { - // Using NoOpLocks can cause intermittent Access denied failures due to - // a VSMB bug that was fixed but not backported to RS5/19H1. - opts.ShareRead = false - opts.CacheIo = false - opts.ReadOnly = false - opts.PseudoOplocks = false - opts.NoOplocks = true - } - opts.NoLocks = true - opts.PseudoDirnotify = true - opts.NoDirectmap = true -} - -// findVSMBShare finds a share by `hostPath`. If not found returns `ErrNotAttached`. -func (uvm *UtilityVM) findVSMBShare(ctx context.Context, m map[string]*VSMBShare, shareKey string) (*VSMBShare, error) { - share, ok := m[shareKey] - if !ok { - return nil, ErrNotAttached - } - return share, nil -} - -// openHostPath opens the given path and returns the handle. The handle is opened with -// full sharing and no access mask. The directory must already exist. This -// function is intended to return a handle suitable for use with GetFileInformationByHandleEx. -// -// We are not able to use builtin Go functionality for opening a directory path: -// - os.Open on a directory returns a os.File where Fd() is a search handle from FindFirstFile. -// - syscall.Open does not provide a way to specify FILE_FLAG_BACKUP_SEMANTICS, which is needed to -// open a directory. -// We could use os.Open if the path is a file, but it's easier to just use the same code for both. -// Therefore, we call windows.CreateFile directly. -func openHostPath(path string) (windows.Handle, error) { - u16, err := windows.UTF16PtrFromString(path) - if err != nil { - return 0, err - } - h, err := windows.CreateFile( - u16, - 0, - windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, - nil, - windows.OPEN_EXISTING, - windows.FILE_FLAG_BACKUP_SEMANTICS, - 0) - if err != nil { - return 0, &os.PathError{ - Op: "CreateFile", - Path: path, - Err: err, - } - } - return h, nil -} - -// In 19H1, a change was made to VSMB to require querying file ID for the files being shared in -// order to support direct map. This change was made to ensure correctness in cases where direct -// map is used with saving/restoring VMs. -// -// However, certain file systems (such as Azure Files SMB shares) don't support the FileIdInfo -// query that is used. Azure Files in particular fails with ERROR_INVALID_PARAMETER. This issue -// affects at least 19H1, 19H2, 20H1, and 20H2. -// -// To work around this, we attempt to query for FileIdInfo ourselves if on an affected build. If -// the query fails, we override the specified options to force no direct map to be used. -func forceNoDirectMap(path string) (bool, error) { - if ver := osversion.Build(); ver < osversion.V19H1 || ver > osversion.V20H2 { - return false, nil - } - h, err := openHostPath(path) - if err != nil { - return false, err - } - defer func() { - _ = windows.CloseHandle(h) - }() - var info winapi.FILE_ID_INFO - // We check for any error, rather than just ERROR_INVALID_PARAMETER. It seems better to also - // fall back if e.g. some other backing filesystem is used which returns a different error. - if err := windows.GetFileInformationByHandleEx(h, winapi.FileIdInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))); err != nil { - return true, nil - } - return false, nil -} - -// AddVSMB adds a VSMB share to a Windows utility VM. Each VSMB share is ref-counted and -// only added if it isn't already. This is used for read-only layers, mapped directories -// to a container, and for mapped pipes. -func (uvm *UtilityVM) AddVSMB(ctx context.Context, hostPath string, options *hcsschema.VirtualSmbShareOptions) (*VSMBShare, error) { - if uvm.operatingSystem != "windows" { - return nil, errNotSupported - } - - if !options.ReadOnly && uvm.NoWritableFileShares() { - return nil, fmt.Errorf("adding writable shares is denied: %w", hcs.ErrOperationDenied) - } - - uvm.m.Lock() - defer uvm.m.Unlock() - - // Temporary support to allow single-file mapping. If `hostPath` is a - // directory, map it without restriction. However, if it is a file, map the - // directory containing the file, and use `AllowedFileList` to only allow - // access to that file. If the directory has been mapped before for - // single-file use, add the new file to the `AllowedFileList` and issue an - // Update operation. - st, err := os.Stat(hostPath) - if err != nil { - return nil, err - } - var file string - m := uvm.vsmbDirShares - if !st.IsDir() { - m = uvm.vsmbFileShares - file = hostPath - hostPath = filepath.Dir(hostPath) - options.RestrictFileAccess = true - options.SingleFileMapping = true - } - hostPath = filepath.Clean(hostPath) - - if force, err := forceNoDirectMap(hostPath); err != nil { - return nil, err - } else if force { - log.G(ctx).WithField("path", hostPath).Info("Forcing NoDirectmap for VSMB mount") - options.NoDirectmap = true - } - - var requestType = guestrequest.RequestTypeUpdate - shareKey := getVSMBShareKey(hostPath, options.ReadOnly) - share, err := uvm.findVSMBShare(ctx, m, shareKey) - if err == ErrNotAttached { - requestType = guestrequest.RequestTypeAdd - uvm.vsmbCounter++ - shareName := "s" + strconv.FormatUint(uvm.vsmbCounter, 16) - - share = &VSMBShare{ - vm: uvm, - name: shareName, - guestPath: vsmbSharePrefix + shareName, - HostPath: hostPath, - serialVersionID: vsmbCurrentSerialVersionID, - } - } - newAllowedFiles := share.allowedFiles - if options.RestrictFileAccess { - newAllowedFiles = append(newAllowedFiles, file) - } - - // Update on a VSMB share currently only supports updating the - // AllowedFileList, and in fact will return an error if RestrictFileAccess - // isn't set (e.g. if used on an unrestricted share). So we only call Modify - // if we are either doing an Add, or if RestrictFileAccess is set. - if requestType == guestrequest.RequestTypeAdd || options.RestrictFileAccess { - log.G(ctx).WithFields(logrus.Fields{ - "name": share.name, - "path": hostPath, - "options": fmt.Sprintf("%+#v", options), - "operation": requestType, - }).Info("Modifying VSMB share") - modification := &hcsschema.ModifySettingRequest{ - RequestType: requestType, - Settings: hcsschema.VirtualSmbShare{ - Name: share.name, - Options: options, - Path: hostPath, - AllowedFiles: newAllowedFiles, - }, - ResourcePath: resourcepaths.VSMBShareResourcePath, - } - if err := uvm.modify(ctx, modification); err != nil { - return nil, err - } - } - - share.allowedFiles = newAllowedFiles - share.refCount++ - share.options = *options - m[shareKey] = share - return share, nil -} - -// RemoveVSMB removes a VSMB share from a utility VM. Each VSMB share is ref-counted -// and only actually removed when the ref-count drops to zero. -func (uvm *UtilityVM) RemoveVSMB(ctx context.Context, hostPath string, readOnly bool) error { - if uvm.operatingSystem != "windows" { - return errNotSupported - } - - uvm.m.Lock() - defer uvm.m.Unlock() - - st, err := os.Stat(hostPath) - if err != nil { - return err - } - m := uvm.vsmbDirShares - if !st.IsDir() { - m = uvm.vsmbFileShares - hostPath = filepath.Dir(hostPath) - } - hostPath = filepath.Clean(hostPath) - shareKey := getVSMBShareKey(hostPath, readOnly) - share, err := uvm.findVSMBShare(ctx, m, shareKey) - if err != nil { - return fmt.Errorf("%s is not present as a VSMB share in %s, cannot remove", hostPath, uvm.id) - } - - share.refCount-- - if share.refCount > 0 { - return nil - } - - modification := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeRemove, - Settings: hcsschema.VirtualSmbShare{Name: share.name}, - ResourcePath: resourcepaths.VSMBShareResourcePath, - } - if err := uvm.modify(ctx, modification); err != nil { - return fmt.Errorf("failed to remove vsmb share %s from %s: %+v: %s", hostPath, uvm.id, modification, err) - } - - delete(m, shareKey) - return nil -} - -// GetVSMBUvmPath returns the guest path of a VSMB mount. -func (uvm *UtilityVM) GetVSMBUvmPath(ctx context.Context, hostPath string, readOnly bool) (string, error) { - if hostPath == "" { - return "", fmt.Errorf("no hostPath passed to GetVSMBUvmPath") - } - - uvm.m.Lock() - defer uvm.m.Unlock() - - st, err := os.Stat(hostPath) - if err != nil { - return "", err - } - m := uvm.vsmbDirShares - f := "" - if !st.IsDir() { - m = uvm.vsmbFileShares - hostPath, f = filepath.Split(hostPath) - } - hostPath = filepath.Clean(hostPath) - shareKey := getVSMBShareKey(hostPath, readOnly) - share, err := uvm.findVSMBShare(ctx, m, shareKey) - if err != nil { - return "", err - } - return filepath.Join(share.guestPath, f), nil -} - -var _ = (Cloneable)(&VSMBShare{}) - -// GobEncode serializes the VSMBShare struct -func (vsmb *VSMBShare) GobEncode() ([]byte, error) { - var buf bytes.Buffer - encoder := gob.NewEncoder(&buf) - errMsgFmt := "failed to encode VSMBShare: %s" - // encode only the fields that can be safely deserialized. - // Always use vsmbCurrentSerialVersionID as vsmb.serialVersionID might not have - // been initialized. - if err := encoder.Encode(vsmbCurrentSerialVersionID); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(vsmb.HostPath); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(vsmb.name); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(vsmb.allowedFiles); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(vsmb.guestPath); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(vsmb.options); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - return buf.Bytes(), nil -} - -// GobDecode deserializes the VSMBShare struct into the struct on which this is called -// (i.e the vsmb pointer) -func (vsmb *VSMBShare) GobDecode(data []byte) error { - buf := bytes.NewBuffer(data) - decoder := gob.NewDecoder(buf) - errMsgFmt := "failed to decode VSMBShare: %s" - // fields should be decoded in the same order in which they were encoded. - // And verify the serialVersionID first - if err := decoder.Decode(&vsmb.serialVersionID); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if vsmb.serialVersionID != vsmbCurrentSerialVersionID { - return fmt.Errorf("serialized version of VSMBShare %d doesn't match with the current version %d", vsmb.serialVersionID, vsmbCurrentSerialVersionID) - } - if err := decoder.Decode(&vsmb.HostPath); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&vsmb.name); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&vsmb.allowedFiles); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&vsmb.guestPath); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&vsmb.options); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - return nil -} - -// Clone creates a clone of the VSMBShare `vsmb` and adds that clone to the uvm `vm`. To -// clone VSMB share we just need to add it into the config doc of that VM and increase the -// vsmb counter. -func (vsmb *VSMBShare) Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) error { - cd.doc.VirtualMachine.Devices.VirtualSmb.Shares = append(cd.doc.VirtualMachine.Devices.VirtualSmb.Shares, hcsschema.VirtualSmbShare{ - Name: vsmb.name, - Path: vsmb.HostPath, - Options: &vsmb.options, - AllowedFiles: vsmb.allowedFiles, - }) - vm.vsmbCounter++ - - clonedVSMB := &VSMBShare{ - vm: vm, - HostPath: vsmb.HostPath, - refCount: 1, - name: vsmb.name, - options: vsmb.options, - allowedFiles: vsmb.allowedFiles, - guestPath: vsmb.guestPath, - serialVersionID: vsmbCurrentSerialVersionID, - } - shareKey := getVSMBShareKey(vsmb.HostPath, vsmb.options.ReadOnly) - if vsmb.options.RestrictFileAccess { - vm.vsmbFileShares[shareKey] = clonedVSMB - } else { - vm.vsmbDirShares[shareKey] = clonedVSMB - } - - return nil -} - -// getVSMBShareKey returns a string key which encapsulates the information that is used to -// look up an existing VSMB share. If a share is being added, but there is an existing -// share with the same key, the existing share will be used instead (and its ref count -// incremented). -func getVSMBShareKey(hostPath string, readOnly bool) string { - return fmt.Sprintf("%v-%v", hostPath, readOnly) -} - -func (vsmb *VSMBShare) GetSerialVersionID() uint32 { - return vsmbCurrentSerialVersionID -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/wait.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/wait.go deleted file mode 100644 index d052be533a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/wait.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build windows - -package uvm - -import ( - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/sirupsen/logrus" -) - -// Wait waits synchronously for a utility VM to terminate. -func (uvm *UtilityVM) Wait() error { - err := uvm.hcsSystem.Wait() - - logrus.WithField(logfields.UVMID, uvm.id).Debug("uvm exited, waiting for output processing to complete") - if uvm.outputProcessingDone != nil { - <-uvm.outputProcessingDone - } - - return err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvmfolder/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvmfolder/doc.go deleted file mode 100644 index 9e2b205da0..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvmfolder/doc.go +++ /dev/null @@ -1 +0,0 @@ -package uvmfolder diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go deleted file mode 100644 index 9dd00c8128..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go +++ /dev/null @@ -1 +0,0 @@ -package vmcompute diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/test/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go deleted file mode 100644 index 3622f3bbee..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go +++ /dev/null @@ -1,618 +0,0 @@ -//go:build windows - -package vmcompute - -import ( - gcontext "context" - "syscall" - "time" - - "go.opencensus.io/trace" - - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/timeout" -) - -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go vmcompute.go - -//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? -//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? -//sys hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem? -//sys hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem? -//sys hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? -//sys hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? -//sys hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? -//sys hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? -//sys hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? -//sys hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? -//sys hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? -//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings? -//sys hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? -//sys hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? -//sys hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsSaveComputeSystem? - -//sys hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? -//sys hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? -//sys hcsCloseProcess(process HcsProcess) (hr error) = vmcompute.HcsCloseProcess? -//sys hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? -//sys hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsSignalProcess? -//sys hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? -//sys hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? -//sys hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? -//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties? -//sys hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback? -//sys hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback? - -// errVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously -const errVmcomputeOperationPending = syscall.Errno(0xC0370103) - -// HcsSystem is the handle associated with a created compute system. -type HcsSystem syscall.Handle - -// HcsProcess is the handle associated with a created process in a compute -// system. -type HcsProcess syscall.Handle - -// HcsCallback is the handle associated with the function to call when events -// occur. -type HcsCallback syscall.Handle - -// HcsProcessInformation is the structure used when creating or getting process -// info. -type HcsProcessInformation struct { - // ProcessId is the pid of the created process. - ProcessId uint32 - reserved uint32 //nolint:structcheck - // StdInput is the handle associated with the stdin of the process. - StdInput syscall.Handle - // StdOutput is the handle associated with the stdout of the process. - StdOutput syscall.Handle - // StdError is the handle associated with the stderr of the process. - StdError syscall.Handle -} - -func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error { - if timeout > 0 { - var cancel gcontext.CancelFunc - ctx, cancel = gcontext.WithTimeout(ctx, timeout) - defer cancel() - } - - done := make(chan error, 1) - go func() { - done <- f() - }() - select { - case <-ctx.Done(): - if ctx.Err() == gcontext.DeadlineExceeded { - log.G(ctx).WithField(logfields.Timeout, timeout). - Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.") - } - return ctx.Err() - case err := <-done: - return err - } -} - -func HcsEnumerateComputeSystems(ctx gcontext.Context, query string) (computeSystems, result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsEnumerateComputeSystems") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("query", query)) - - return computeSystems, result, execute(ctx, timeout.SyscallWatcher, func() error { - var ( - computeSystemsp *uint16 - resultp *uint16 - ) - err := hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) - if computeSystemsp != nil { - computeSystems = interop.ConvertAndFreeCoTaskMemString(computeSystemsp) - } - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration string, identity syscall.Handle) (computeSystem HcsSystem, result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsCreateComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes( - trace.StringAttribute("id", id), - trace.StringAttribute("configuration", configuration)) - - return computeSystem, result, execute(ctx, timeout.SystemCreate, func() error { - var resultp *uint16 - err := hcsCreateComputeSystem(id, configuration, identity, &computeSystem, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsOpenComputeSystem(ctx gcontext.Context, id string) (computeSystem HcsSystem, result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsOpenComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - - return computeSystem, result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsOpenComputeSystem(id, &computeSystem, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsCloseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem) (hr error) { - ctx, span := oc.StartSpan(ctx, "HcsCloseComputeSystem") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return execute(ctx, timeout.SyscallWatcher, func() error { - return hcsCloseComputeSystem(computeSystem) - }) -} - -func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsStartComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SystemStart, func() error { - var resultp *uint16 - err := hcsStartComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsShutdownComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsShutdownComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsTerminateComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsTerminateComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsPauseComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SystemPause, func() error { - var resultp *uint16 - err := hcsPauseComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsResumeComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SystemResume, func() error { - var resultp *uint16 - err := hcsResumeComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsGetComputeSystemProperties(ctx gcontext.Context, computeSystem HcsSystem, propertyQuery string) (properties, result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsGetComputeSystemProperties") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) - - return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { - var ( - propertiesp *uint16 - resultp *uint16 - ) - err := hcsGetComputeSystemProperties(computeSystem, propertyQuery, &propertiesp, &resultp) - if propertiesp != nil { - properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) - } - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, configuration string) (result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsModifyComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("configuration", configuration)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsModifyComputeSystem(computeSystem, configuration, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsModifyServiceSettings") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("settings", settings)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsModifyServiceSettings(settings, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsRegisterComputeSystemCallback") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { - return hcsRegisterComputeSystemCallback(computeSystem, callback, context, &callbackHandle) - }) -} - -func HcsUnregisterComputeSystemCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { - ctx, span := oc.StartSpan(ctx, "HcsUnregisterComputeSystemCallback") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return execute(ctx, timeout.SyscallWatcher, func() error { - return hcsUnregisterComputeSystemCallback(callbackHandle) - }) -} - -func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processParameters string) (processInformation HcsProcessInformation, process HcsProcess, result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsCreateProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - if span.IsRecordingEvents() { - // wont handle v1 process parameters - if s, err := log.ScrubProcessParameters(processParameters); err == nil { - span.AddAttributes(trace.StringAttribute("processParameters", s)) - } - } - - return processInformation, process, result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsCreateProcess(computeSystem, processParameters, &processInformation, &process, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsOpenProcess(ctx gcontext.Context, computeSystem HcsSystem, pid uint32) (process HcsProcess, result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsOpenProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.Int64Attribute("pid", int64(pid))) - - return process, result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsOpenProcess(computeSystem, pid, &process, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsCloseProcess(ctx gcontext.Context, process HcsProcess) (hr error) { - ctx, span := oc.StartSpan(ctx, "HcsCloseProcess") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return execute(ctx, timeout.SyscallWatcher, func() error { - return hcsCloseProcess(process) - }) -} - -func HcsTerminateProcess(ctx gcontext.Context, process HcsProcess) (result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsTerminateProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsTerminateProcess(process, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsSignalProcess(ctx gcontext.Context, process HcsProcess, options string) (result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsSignalProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsSignalProcess(process, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsGetProcessInfo(ctx gcontext.Context, process HcsProcess) (processInformation HcsProcessInformation, result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsGetProcessInfo") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - - return processInformation, result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsGetProcessInfo(process, &processInformation, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsGetProcessProperties(ctx gcontext.Context, process HcsProcess) (processProperties, result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsGetProcessProperties") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - - return processProperties, result, execute(ctx, timeout.SyscallWatcher, func() error { - var ( - processPropertiesp *uint16 - resultp *uint16 - ) - err := hcsGetProcessProperties(process, &processPropertiesp, &resultp) - if processPropertiesp != nil { - processProperties = interop.ConvertAndFreeCoTaskMemString(processPropertiesp) - } - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsModifyProcess(ctx gcontext.Context, process HcsProcess, settings string) (result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsModifyProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("settings", settings)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsModifyProcess(process, settings, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsGetServiceProperties(ctx gcontext.Context, propertyQuery string) (properties, result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsGetServiceProperties") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) - - return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { - var ( - propertiesp *uint16 - resultp *uint16 - ) - err := hcsGetServiceProperties(propertyQuery, &propertiesp, &resultp) - if propertiesp != nil { - properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) - } - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsRegisterProcessCallback(ctx gcontext.Context, process HcsProcess, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsRegisterProcessCallback") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { - return hcsRegisterProcessCallback(process, callback, context, &callbackHandle) - }) -} - -func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { - ctx, span := oc.StartSpan(ctx, "HcsUnregisterProcessCallback") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return execute(ctx, timeout.SyscallWatcher, func() error { - return hcsUnregisterProcessCallback(callbackHandle) - }) -} - -func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := oc.StartSpan(ctx, "HcsSaveComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsSaveComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go deleted file mode 100644 index e12253c947..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// ActivateLayer will find the layer with the given id and mount it's filesystem. -// For a read/write layer, the mounted filesystem will appear as a volume on the -// host, while a read-only layer is generally expected to be a no-op. -// An activated layer must later be deactivated via DeactivateLayer. -func ActivateLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::ActivateLayer" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = activateLayer(&stdDriverInfo, path) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go deleted file mode 100644 index aea8b421ef..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go +++ /dev/null @@ -1,183 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/safefile" - "github.com/Microsoft/hcsshim/internal/winapi" - "go.opencensus.io/trace" -) - -type baseLayerWriter struct { - ctx context.Context - s *trace.Span - - root *os.File - f *os.File - bw *winio.BackupFileWriter - err error - hasUtilityVM bool - dirInfo []dirInfo -} - -type dirInfo struct { - path string - fileInfo winio.FileBasicInfo -} - -// reapplyDirectoryTimes reapplies directory modification, creation, etc. times -// after processing of the directory tree has completed. The times are expected -// to be ordered such that parent directories come before child directories. -func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { - for i := range dis { - di := &dis[len(dis)-i-1] // reverse order: process child directories first - f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_OPEN, winapi.FILE_DIRECTORY_FILE|syscall.FILE_FLAG_OPEN_REPARSE_POINT) - if err != nil { - return err - } - - err = winio.SetFileBasicInfo(f, &di.fileInfo) - f.Close() - if err != nil { - return err - } - } - return nil -} - -func (w *baseLayerWriter) closeCurrentFile() error { - if w.f != nil { - err := w.bw.Close() - err2 := w.f.Close() - w.f = nil - w.bw = nil - if err != nil { - return err - } - if err2 != nil { - return err2 - } - } - return nil -} - -func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) { - defer func() { - if err != nil { - w.err = err - } - }() - - err = w.closeCurrentFile() - if err != nil { - return err - } - - if filepath.ToSlash(name) == `UtilityVM/Files` { - w.hasUtilityVM = true - } - - var f *os.File - defer func() { - if f != nil { - f.Close() - } - }() - - extraFlags := uint32(0) - if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - extraFlags |= winapi.FILE_DIRECTORY_FILE - w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) - } - - mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) - f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, extraFlags) - if err != nil { - return hcserror.New(err, "Failed to safefile.OpenRelative", name) - } - - err = winio.SetFileBasicInfo(f, fileInfo) - if err != nil { - return hcserror.New(err, "Failed to SetFileBasicInfo", name) - } - - w.f = f - w.bw = winio.NewBackupFileWriter(f, true) - f = nil - return nil -} - -func (w *baseLayerWriter) AddLink(name string, target string) (err error) { - defer func() { - if err != nil { - w.err = err - } - }() - - err = w.closeCurrentFile() - if err != nil { - return err - } - - return safefile.LinkRelative(target, w.root, name, w.root) -} - -func (w *baseLayerWriter) Remove(name string) error { - return errors.New("base layer cannot have tombstones") -} - -func (w *baseLayerWriter) Write(b []byte) (int, error) { - n, err := w.bw.Write(b) - if err != nil { - w.err = err - } - return n, err -} - -func (w *baseLayerWriter) Close() (err error) { - defer w.s.End() - defer func() { oc.SetSpanStatus(w.s, err) }() - defer func() { - w.root.Close() - w.root = nil - }() - - err = w.closeCurrentFile() - if err != nil { - return err - } - if w.err == nil { - // Restore the file times of all the directories, since they may have - // been modified by creating child directories. - err = reapplyDirectoryTimes(w.root, w.dirInfo) - if err != nil { - return err - } - - err = ProcessBaseLayer(w.ctx, w.root.Name()) - if err != nil { - return err - } - - if w.hasUtilityVM { - err := safefile.EnsureNotReparsePointRelative("UtilityVM", w.root) - if err != nil { - return err - } - err = ProcessUtilityVMImage(w.ctx, filepath.Join(w.root.Name(), "UtilityVM")) - if err != nil { - return err - } - } - } - return w.err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go deleted file mode 100644 index 932475723a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// CreateLayer creates a new, empty, read-only layer on the filesystem based on -// the parent layer provided. -func CreateLayer(ctx context.Context, path, parent string) (err error) { - title := "hcsshim::CreateLayer" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parent", parent)) - - err = createLayer(&stdDriverInfo, path, parent) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go deleted file mode 100644 index 5c9d5d2507..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go +++ /dev/null @@ -1,36 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - "strings" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// CreateScratchLayer creates and populates new read-write layer for use by a container. -// This requires the full list of paths to all parent layers up to the base -func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { - title := "hcsshim::CreateScratchLayer" - ctx, span := oc.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) - if err != nil { - return err - } - - err = createSandboxLayer(&stdDriverInfo, path, 0, layers) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go deleted file mode 100644 index e3bc77cbc8..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// DeactivateLayer will dismount a layer that was mounted via ActivateLayer. -func DeactivateLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::DeactivateLayer" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = deactivateLayer(&stdDriverInfo, path) - if err != nil { - return hcserror.New(err, title+"- failed", "") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go deleted file mode 100644 index d0a59efe12..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// DestroyLayer will remove the on-disk files representing the layer with the given -// path, including that layer's containing folder, if any. -func DestroyLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::DestroyLayer" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = destroyLayer(&stdDriverInfo, path) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go deleted file mode 100644 index dd1d555804..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package wclayer provides bindings to HCS's legacy layer management API and -// provides a higher level interface around these calls for container layer -// management. -package wclayer diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go deleted file mode 100644 index e2ec27ad08..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go +++ /dev/null @@ -1,142 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - "os" - "path/filepath" - "syscall" - "unsafe" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/osversion" - "go.opencensus.io/trace" -) - -// ExpandScratchSize expands the size of a layer to at least size bytes. -func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error) { - title := "hcsshim::ExpandScratchSize" - ctx, span := oc.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.Int64Attribute("size", int64(size))) - - err = expandSandboxSize(&stdDriverInfo, path, size) - if err != nil { - return hcserror.New(err, title, "") - } - - // Manually expand the volume now in order to work around bugs in 19H1 and - // prerelease versions of Vb. Remove once this is fixed in Windows. - if build := osversion.Build(); build >= osversion.V19H1 && build < 19020 { - err = expandSandboxVolume(ctx, path) - if err != nil { - return err - } - } - return nil -} - -type virtualStorageType struct { - DeviceID uint32 - VendorID [16]byte -} - -type openVersion2 struct { - GetInfoOnly int32 // bool but 4-byte aligned - ReadOnly int32 // bool but 4-byte aligned - ResiliencyGUID [16]byte // GUID -} - -type openVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 - Version2 openVersion2 -} - -func attachVhd(path string) (syscall.Handle, error) { - var ( - defaultType virtualStorageType - handle syscall.Handle - ) - parameters := openVirtualDiskParameters{Version: 2} - err := openVirtualDisk( - &defaultType, - path, - 0, - 0, - ¶meters, - &handle) - if err != nil { - return 0, &os.PathError{Op: "OpenVirtualDisk", Path: path, Err: err} - } - err = attachVirtualDisk(handle, 0, 0, 0, 0, 0) - if err != nil { - syscall.Close(handle) - return 0, &os.PathError{Op: "AttachVirtualDisk", Path: path, Err: err} - } - return handle, nil -} - -func expandSandboxVolume(ctx context.Context, path string) error { - // Mount the sandbox VHD temporarily. - vhdPath := filepath.Join(path, "sandbox.vhdx") - vhd, err := attachVhd(vhdPath) - if err != nil { - return &os.PathError{Op: "OpenVirtualDisk", Path: vhdPath, Err: err} - } - defer syscall.Close(vhd) - - // Open the volume. - volumePath, err := GetLayerMountPath(ctx, path) - if err != nil { - return err - } - if volumePath[len(volumePath)-1] == '\\' { - volumePath = volumePath[:len(volumePath)-1] - } - volume, err := os.OpenFile(volumePath, os.O_RDWR, 0) - if err != nil { - return err - } - defer volume.Close() - - // Get the volume's underlying partition size in NTFS clusters. - var ( - partitionSize int64 - bytes uint32 - ) - const _IOCTL_DISK_GET_LENGTH_INFO = 0x0007405C - err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _IOCTL_DISK_GET_LENGTH_INFO, nil, 0, (*byte)(unsafe.Pointer(&partitionSize)), 8, &bytes, nil) - if err != nil { - return &os.PathError{Op: "IOCTL_DISK_GET_LENGTH_INFO", Path: volume.Name(), Err: err} - } - const ( - clusterSize = 4096 - sectorSize = 512 - ) - targetClusters := partitionSize / clusterSize - - // Get the volume's current size in NTFS clusters. - var volumeSize int64 - err = getDiskFreeSpaceEx(volume.Name()+"\\", nil, &volumeSize, nil) - if err != nil { - return &os.PathError{Op: "GetDiskFreeSpaceEx", Path: volume.Name(), Err: err} - } - volumeClusters := volumeSize / clusterSize - - // Only resize the volume if there is space to grow, otherwise this will - // fail with invalid parameter. NTFS reserves one cluster. - if volumeClusters+1 < targetClusters { - targetSectors := targetClusters * (clusterSize / sectorSize) - const _FSCTL_EXTEND_VOLUME = 0x000900F0 - err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _FSCTL_EXTEND_VOLUME, (*byte)(unsafe.Pointer(&targetSectors)), 8, nil, 0, &bytes, nil) - if err != nil { - return &os.PathError{Op: "FSCTL_EXTEND_VOLUME", Path: volume.Name(), Err: err} - } - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go deleted file mode 100644 index 08d6afd3b1..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go +++ /dev/null @@ -1,101 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - "io/ioutil" - "os" - "strings" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// ExportLayer will create a folder at exportFolderPath and fill that folder with -// the transport format version of the layer identified by layerId. This transport -// format includes any metadata required for later importing the layer (using -// ImportLayer), and requires the full list of parent layer paths in order to -// perform the export. -func ExportLayer(ctx context.Context, path string, exportFolderPath string, parentLayerPaths []string) (err error) { - title := "hcsshim::ExportLayer" - ctx, span := oc.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("exportFolderPath", exportFolderPath), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) - if err != nil { - return err - } - - err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} - -// LayerReader is an interface that supports reading an existing container image layer. -type LayerReader interface { - // Next advances to the next file and returns the name, size, and file info - Next() (string, int64, *winio.FileBasicInfo, error) - // Read reads data from the current file, in the format of a Win32 backup stream, and - // returns the number of bytes read. - Read(b []byte) (int, error) - // Close finishes the layer reading process and releases any resources. - Close() error -} - -// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. -// The caller must have taken the SeBackupPrivilege privilege -// to call this and any methods on the resulting LayerReader. -func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) (_ LayerReader, err error) { - ctx, span := oc.StartSpan(ctx, "hcsshim::NewLayerReader") - defer func() { - if err != nil { - oc.SetSpanStatus(span, err) - span.End() - } - }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - exportPath, err := ioutil.TempDir("", "hcs") - if err != nil { - return nil, err - } - err = ExportLayer(ctx, path, exportPath, parentLayerPaths) - if err != nil { - os.RemoveAll(exportPath) - return nil, err - } - return &legacyLayerReaderWrapper{ - ctx: ctx, - s: span, - legacyLayerReader: newLegacyLayerReader(exportPath), - }, nil -} - -type legacyLayerReaderWrapper struct { - ctx context.Context - s *trace.Span - - *legacyLayerReader -} - -func (r *legacyLayerReaderWrapper) Close() (err error) { - defer r.s.End() - defer func() { oc.SetSpanStatus(r.s, err) }() - - err = r.legacyLayerReader.Close() - os.RemoveAll(r.root) - return err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go deleted file mode 100644 index 715e06e379..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go +++ /dev/null @@ -1,52 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - "syscall" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// GetLayerMountPath will look for a mounted layer with the given path and return -// the path at which that layer can be accessed. This path may be a volume path -// if the layer is a mounted read-write layer, otherwise it is expected to be the -// folder path at which the layer is stored. -func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { - title := "hcsshim::GetLayerMountPath" - ctx, span := oc.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - var mountPathLength uintptr = 0 - - // Call the procedure itself. - log.G(ctx).Debug("Calling proc (1)") - err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) - if err != nil { - return "", hcserror.New(err, title, "(first call)") - } - - // Allocate a mount path of the returned length. - if mountPathLength == 0 { - return "", nil - } - mountPathp := make([]uint16, mountPathLength) - mountPathp[0] = 0 - - // Call the procedure again - log.G(ctx).Debug("Calling proc (2)") - err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) - if err != nil { - return "", hcserror.New(err, title, "(second call)") - } - - mountPath := syscall.UTF16ToString(mountPathp[0:]) - span.AddAttributes(trace.StringAttribute("mountPath", mountPath)) - return mountPath, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go deleted file mode 100644 index 5e400fb209..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// GetSharedBaseImages will enumerate the images stored in the common central -// image store and return descriptive info about those images for the purpose -// of registering them with the graphdriver, graph, and tagstore. -func GetSharedBaseImages(ctx context.Context) (_ string, err error) { - title := "hcsshim::GetSharedBaseImages" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - var buffer *uint16 - err = getBaseImages(&buffer) - if err != nil { - return "", hcserror.New(err, title, "") - } - imageData := interop.ConvertAndFreeCoTaskMemString(buffer) - span.AddAttributes(trace.StringAttribute("imageData", imageData)) - return imageData, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go deleted file mode 100644 index 20217ed81b..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// GrantVmAccess adds access to a file for a given VM -func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error) { - title := "hcsshim::GrantVmAccess" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("vm-id", vmid), - trace.StringAttribute("path", filepath)) - - err = grantVmAccess(vmid, filepath) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go deleted file mode 100644 index b9946c5f4a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go +++ /dev/null @@ -1,168 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/safefile" - "go.opencensus.io/trace" -) - -// ImportLayer will take the contents of the folder at importFolderPath and import -// that into a layer with the id layerId. Note that in order to correctly populate -// the layer and interperet the transport format, all parent layers must already -// be present on the system at the paths provided in parentLayerPaths. -func ImportLayer(ctx context.Context, path string, importFolderPath string, parentLayerPaths []string) (err error) { - title := "hcsshim::ImportLayer" - ctx, span := oc.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("importFolderPath", importFolderPath), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) - if err != nil { - return err - } - - err = importLayer(&stdDriverInfo, path, importFolderPath, layers) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} - -// LayerWriter is an interface that supports writing a new container image layer. -type LayerWriter interface { - // Add adds a file to the layer with given metadata. - Add(name string, fileInfo *winio.FileBasicInfo) error - // AddLink adds a hard link to the layer. The target must already have been added. - AddLink(name string, target string) error - // Remove removes a file that was present in a parent layer from the layer. - Remove(name string) error - // Write writes data to the current file. The data must be in the format of a Win32 - // backup stream. - Write(b []byte) (int, error) - // Close finishes the layer writing process and releases any resources. - Close() error -} - -type legacyLayerWriterWrapper struct { - ctx context.Context - s *trace.Span - - *legacyLayerWriter - path string - parentLayerPaths []string -} - -func (r *legacyLayerWriterWrapper) Close() (err error) { - defer r.s.End() - defer func() { oc.SetSpanStatus(r.s, err) }() - defer os.RemoveAll(r.root.Name()) - defer r.legacyLayerWriter.CloseRoots() - - err = r.legacyLayerWriter.Close() - if err != nil { - return err - } - - if err = ImportLayer(r.ctx, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { - return err - } - for _, name := range r.Tombstones { - if err = safefile.RemoveRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) { - return err - } - } - // Add any hard links that were collected. - for _, lnk := range r.PendingLinks { - if err = safefile.RemoveRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) { - return err - } - if err = safefile.LinkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil { - return err - } - } - - // The reapplyDirectoryTimes must be called AFTER we are done with Tombstone - // deletion and hard link creation. This is because Tombstone deletion and hard link - // creation updates the directory last write timestamps so that will change the - // timestamps added by the `Add` call. Some container applications depend on the - // correctness of these timestamps and so we should change the timestamps back to - // the original value (i.e the value provided in the Add call) after this - // processing is done. - err = reapplyDirectoryTimes(r.destRoot, r.changedDi) - if err != nil { - return err - } - - // Prepare the utility VM for use if one is present in the layer. - if r.HasUtilityVM { - err := safefile.EnsureNotReparsePointRelative("UtilityVM", r.destRoot) - if err != nil { - return err - } - err = ProcessUtilityVMImage(r.ctx, filepath.Join(r.destRoot.Name(), "UtilityVM")) - if err != nil { - return err - } - } - return nil -} - -// NewLayerWriter returns a new layer writer for creating a layer on disk. -// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges -// to call this and any methods on the resulting LayerWriter. -func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) (_ LayerWriter, err error) { - ctx, span := oc.StartSpan(ctx, "hcsshim::NewLayerWriter") - defer func() { - if err != nil { - oc.SetSpanStatus(span, err) - span.End() - } - }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - if len(parentLayerPaths) == 0 { - // This is a base layer. It gets imported differently. - f, err := safefile.OpenRoot(path) - if err != nil { - return nil, err - } - return &baseLayerWriter{ - ctx: ctx, - s: span, - root: f, - }, nil - } - - importPath, err := ioutil.TempDir("", "hcs") - if err != nil { - return nil, err - } - w, err := newLegacyLayerWriter(importPath, parentLayerPaths, path) - if err != nil { - return nil, err - } - return &legacyLayerWriterWrapper{ - ctx: ctx, - s: span, - legacyLayerWriter: w, - path: importPath, - parentLayerPaths: parentLayerPaths, - }, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go deleted file mode 100644 index 4d82977ea1..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// LayerExists will return true if a layer with the given id exists and is known -// to the system. -func LayerExists(ctx context.Context, path string) (_ bool, err error) { - title := "hcsshim::LayerExists" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - // Call the procedure itself. - var exists uint32 - err = layerExists(&stdDriverInfo, path, &exists) - if err != nil { - return false, hcserror.New(err, title, "") - } - span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0)) - return exists != 0, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go deleted file mode 100644 index d4805f1444..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - "path/filepath" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// LayerID returns the layer ID of a layer on disk. -func LayerID(ctx context.Context, path string) (_ guid.GUID, err error) { - title := "hcsshim::LayerID" - ctx, span := oc.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - _, file := filepath.Split(path) - return NameToGuid(ctx, file) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go deleted file mode 100644 index 86f0549ef6..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go +++ /dev/null @@ -1,99 +0,0 @@ -//go:build windows - -package wclayer - -// This file contains utility functions to support storage (graph) related -// functionality. - -import ( - "context" - "syscall" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/sirupsen/logrus" -) - -/* To pass into syscall, we need a struct matching the following: -enum GraphDriverType -{ - DiffDriver, - FilterDriver -}; - -struct DriverInfo { - GraphDriverType Flavour; - LPCWSTR HomeDir; -}; -*/ - -type driverInfo struct { - Flavour int - HomeDirp *uint16 -} - -var ( - utf16EmptyString uint16 - stdDriverInfo = driverInfo{1, &utf16EmptyString} -) - -/* To pass into syscall, we need a struct matching the following: -typedef struct _WC_LAYER_DESCRIPTOR { - - // - // The ID of the layer - // - - GUID LayerId; - - // - // Additional flags - // - - union { - struct { - ULONG Reserved : 31; - ULONG Dirty : 1; // Created from sandbox as a result of snapshot - }; - ULONG Value; - } Flags; - - // - // Path to the layer root directory, null-terminated - // - - PCWSTR Path; - -} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR; -*/ -type WC_LAYER_DESCRIPTOR struct { - LayerId guid.GUID - Flags uint32 - Pathp *uint16 -} - -func layerPathsToDescriptors(ctx context.Context, parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { - // Array of descriptors that gets constructed. - var layers []WC_LAYER_DESCRIPTOR - - for i := 0; i < len(parentLayerPaths); i++ { - g, err := LayerID(ctx, parentLayerPaths[i]) - if err != nil { - logrus.WithError(err).Debug("Failed to convert name to guid") - return nil, err - } - - p, err := syscall.UTF16PtrFromString(parentLayerPaths[i]) - if err != nil { - logrus.WithError(err).Debug("Failed conversion of parentLayerPath to pointer") - return nil, err - } - - layers = append(layers, WC_LAYER_DESCRIPTOR{ - LayerId: g, - Flags: 0, - Pathp: p, - }) - } - - return layers, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go deleted file mode 100644 index 3e431877f8..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ /dev/null @@ -1,812 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "bufio" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/longpath" - "github.com/Microsoft/hcsshim/internal/safefile" - "github.com/Microsoft/hcsshim/internal/winapi" -) - -var errorIterationCanceled = errors.New("") - -var mutatedUtilityVMFiles = map[string]bool{ - `EFI\Microsoft\Boot\BCD`: true, - `EFI\Microsoft\Boot\BCD.LOG`: true, - `EFI\Microsoft\Boot\BCD.LOG1`: true, - `EFI\Microsoft\Boot\BCD.LOG2`: true, -} - -const ( - filesPath = `Files` - hivesPath = `Hives` - utilityVMPath = `UtilityVM` - utilityVMFilesPath = `UtilityVM\Files` -) - -func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) { - return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition) -} - -func hasPathPrefix(p, prefix string) bool { - return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\' -} - -type fileEntry struct { - path string - fi os.FileInfo - err error -} - -type legacyLayerReader struct { - root string - result chan *fileEntry - proceed chan bool - currentFile *os.File - backupReader *winio.BackupFileReader -} - -// newLegacyLayerReader returns a new LayerReader that can read the Windows -// container layer transport format from disk. -func newLegacyLayerReader(root string) *legacyLayerReader { - r := &legacyLayerReader{ - root: root, - result: make(chan *fileEntry), - proceed: make(chan bool), - } - go r.walk() - return r -} - -func readTombstones(path string) (map[string]([]string), error) { - tf, err := os.Open(filepath.Join(path, "tombstones.txt")) - if err != nil { - return nil, err - } - defer tf.Close() - s := bufio.NewScanner(tf) - if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { - return nil, errors.New("invalid tombstones file") - } - - ts := make(map[string]([]string)) - for s.Scan() { - t := filepath.Join(filesPath, s.Text()[1:]) // skip leading `\` - dir := filepath.Dir(t) - ts[dir] = append(ts[dir], t) - } - if err = s.Err(); err != nil { - return nil, err - } - - return ts, nil -} - -func (r *legacyLayerReader) walkUntilCancelled() error { - root, err := longpath.LongAbs(r.root) - if err != nil { - return err - } - - r.root = root - ts, err := readTombstones(r.root) - if err != nil { - return err - } - - err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048. - // Handle failure from what may be a golang bug in the conversion of - // UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat - // which is called by filepath.Walk will fail when a filename contains - // unicode characters. Skip the recycle bin regardless which is goodness. - if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() { - return filepath.SkipDir - } - - if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") { - return nil - } - - r.result <- &fileEntry{path, info, nil} - if !<-r.proceed { - return errorIterationCanceled - } - - // List all the tombstones. - if info.IsDir() { - relPath, err := filepath.Rel(r.root, path) - if err != nil { - return err - } - if dts, ok := ts[relPath]; ok { - for _, t := range dts { - r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil} - if !<-r.proceed { - return errorIterationCanceled - } - } - } - } - return nil - }) - if err == errorIterationCanceled { - return nil - } - if err == nil { - return io.EOF - } - return err -} - -func (r *legacyLayerReader) walk() { - defer close(r.result) - if !<-r.proceed { - return - } - - err := r.walkUntilCancelled() - if err != nil { - for { - r.result <- &fileEntry{err: err} - if !<-r.proceed { - return - } - } - } -} - -func (r *legacyLayerReader) reset() { - if r.backupReader != nil { - r.backupReader.Close() - r.backupReader = nil - } - if r.currentFile != nil { - r.currentFile.Close() - r.currentFile = nil - } -} - -func findBackupStreamSize(r io.Reader) (int64, error) { - br := winio.NewBackupStreamReader(r) - for { - hdr, err := br.Next() - if err != nil { - if err == io.EOF { - err = nil - } - return 0, err - } - if hdr.Id == winio.BackupData { - return hdr.Size, nil - } - } -} - -func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) { - r.reset() - r.proceed <- true - fe := <-r.result - if fe == nil { - err = errors.New("LegacyLayerReader closed") - return - } - if fe.err != nil { - err = fe.err - return - } - - path, err = filepath.Rel(r.root, fe.path) - if err != nil { - return - } - - if fe.fi == nil { - // This is a tombstone. Return a nil fileInfo. - return - } - - if fe.fi.IsDir() && hasPathPrefix(path, filesPath) { - fe.path += ".$wcidirs$" - } - - f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING) - if err != nil { - return - } - defer func() { - if f != nil { - f.Close() - } - }() - - fileInfo, err = winio.GetFileBasicInfo(f) - if err != nil { - return - } - - if !hasPathPrefix(path, filesPath) { - size = fe.fi.Size() - r.backupReader = winio.NewBackupFileReader(f, false) - if path == hivesPath || path == filesPath { - // The Hives directory has a non-deterministic file time because of the - // nature of the import process. Use the times from System_Delta. - var g *os.File - g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`)) - if err != nil { - return - } - attr := fileInfo.FileAttributes - fileInfo, err = winio.GetFileBasicInfo(g) - g.Close() - if err != nil { - return - } - fileInfo.FileAttributes = attr - } - - // The creation time and access time get reset for files outside of the Files path. - fileInfo.CreationTime = fileInfo.LastWriteTime - fileInfo.LastAccessTime = fileInfo.LastWriteTime - } else { - // The file attributes are written before the backup stream. - var attr uint32 - err = binary.Read(f, binary.LittleEndian, &attr) - if err != nil { - return - } - fileInfo.FileAttributes = attr - beginning := int64(4) - - // Find the accurate file size. - if !fe.fi.IsDir() { - size, err = findBackupStreamSize(f) - if err != nil { - err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err} - return - } - } - - // Return back to the beginning of the backup stream. - _, err = f.Seek(beginning, 0) - if err != nil { - return - } - } - - r.currentFile = f - f = nil - return -} - -func (r *legacyLayerReader) Read(b []byte) (int, error) { - if r.backupReader == nil { - if r.currentFile == nil { - return 0, io.EOF - } - return r.currentFile.Read(b) - } - return r.backupReader.Read(b) -} - -func (r *legacyLayerReader) Seek(offset int64, whence int) (int64, error) { - if r.backupReader == nil { - if r.currentFile == nil { - return 0, errors.New("no current file") - } - return r.currentFile.Seek(offset, whence) - } - return 0, errors.New("seek not supported on this stream") -} - -func (r *legacyLayerReader) Close() error { - r.proceed <- false - <-r.result - r.reset() - return nil -} - -type pendingLink struct { - Path, Target string - TargetRoot *os.File -} - -type pendingDir struct { - Path string - Root *os.File -} - -type legacyLayerWriter struct { - root *os.File - destRoot *os.File - parentRoots []*os.File - currentFile *os.File - bufWriter *bufio.Writer - currentFileName string - currentFileRoot *os.File - backupWriter *winio.BackupFileWriter - Tombstones []string - HasUtilityVM bool - changedDi []dirInfo - addedFiles map[string]bool - PendingLinks []pendingLink - pendingDirs []pendingDir - currentIsDir bool -} - -// newLegacyLayerWriter returns a LayerWriter that can write the container layer -// transport format to disk. -func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) { - w = &legacyLayerWriter{ - addedFiles: make(map[string]bool), - } - defer func() { - if err != nil { - w.CloseRoots() - w = nil - } - }() - w.root, err = safefile.OpenRoot(root) - if err != nil { - return - } - w.destRoot, err = safefile.OpenRoot(destRoot) - if err != nil { - return - } - for _, r := range parentRoots { - f, err := safefile.OpenRoot(r) - if err != nil { - return w, err - } - w.parentRoots = append(w.parentRoots, f) - } - w.bufWriter = bufio.NewWriterSize(ioutil.Discard, 65536) - return -} - -func (w *legacyLayerWriter) CloseRoots() { - if w.root != nil { - w.root.Close() - w.root = nil - } - if w.destRoot != nil { - w.destRoot.Close() - w.destRoot = nil - } - for i := range w.parentRoots { - _ = w.parentRoots[i].Close() - } - w.parentRoots = nil -} - -func (w *legacyLayerWriter) initUtilityVM() error { - if !w.HasUtilityVM { - err := safefile.MkdirRelative(utilityVMPath, w.destRoot) - if err != nil { - return err - } - // Server 2016 does not support multiple layers for the utility VM, so - // clone the utility VM from the parent layer into this layer. Use hard - // links to avoid unnecessary copying, since most of the files are - // immutable. - err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles) - if err != nil { - return fmt.Errorf("cloning the parent utility VM image failed: %s", err) - } - w.HasUtilityVM = true - } - return nil -} - -func (w *legacyLayerWriter) reset() error { - err := w.bufWriter.Flush() - if err != nil { - return err - } - w.bufWriter.Reset(ioutil.Discard) - if w.currentIsDir { - r := w.currentFile - br := winio.NewBackupStreamReader(r) - // Seek to the beginning of the backup stream, skipping the fileattrs - if _, err := r.Seek(4, io.SeekStart); err != nil { - return err - } - - for { - bhdr, err := br.Next() - if err == io.EOF { - // end of backupstream data - break - } - if err != nil { - return err - } - switch bhdr.Id { - case winio.BackupReparseData: - // The current file is a `.$wcidirs$` metadata file that - // describes a directory reparse point. Delete the placeholder - // directory to prevent future files being added into the - // destination of the reparse point during the ImportLayer call - if err := safefile.RemoveRelative(w.currentFileName, w.currentFileRoot); err != nil { - return err - } - w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot}) - default: - // ignore all other stream types, as we only care about directory reparse points - } - } - w.currentIsDir = false - } - if w.backupWriter != nil { - w.backupWriter.Close() - w.backupWriter = nil - } - if w.currentFile != nil { - w.currentFile.Close() - w.currentFile = nil - w.currentFileName = "" - w.currentFileRoot = nil - } - return nil -} - -// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata -func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) { - src, err := safefile.OpenRelative( - subPath, - srcRoot, - syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, - syscall.FILE_SHARE_READ, - winapi.FILE_OPEN, - winapi.FILE_OPEN_REPARSE_POINT) - if err != nil { - return nil, err - } - defer src.Close() - srcr := winio.NewBackupFileReader(src, true) - defer srcr.Close() - - fileInfo, err = winio.GetFileBasicInfo(src) - if err != nil { - return nil, err - } - - extraFlags := uint32(0) - if isDir { - extraFlags |= winapi.FILE_DIRECTORY_FILE - } - dest, err := safefile.OpenRelative( - subPath, - destRoot, - syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, - syscall.FILE_SHARE_READ, - winapi.FILE_CREATE, - extraFlags) - if err != nil { - return nil, err - } - defer dest.Close() - - err = winio.SetFileBasicInfo(dest, fileInfo) - if err != nil { - return nil, err - } - - destw := winio.NewBackupFileWriter(dest, true) - defer func() { - cerr := destw.Close() - if err == nil { - err = cerr - } - }() - - _, err = io.Copy(destw, srcr) - if err != nil { - return nil, err - } - - return fileInfo, nil -} - -// cloneTree clones a directory tree using hard links. It skips hard links for -// the file names in the provided map and just copies those files. -func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error { - var di []dirInfo - err := safefile.EnsureNotReparsePointRelative(subPath, srcRoot) - if err != nil { - return err - } - err = filepath.Walk(filepath.Join(srcRoot.Name(), subPath), func(srcFilePath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - relPath, err := filepath.Rel(srcRoot.Name(), srcFilePath) - if err != nil { - return err - } - - fileAttributes := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes - // Directories, reparse points, and files that will be mutated during - // utility VM import must be copied. All other files can be hard linked. - isReparsePoint := fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 - // In go1.9, FileInfo.IsDir() returns false if the directory is also a symlink. - // See: https://github.com/golang/go/commit/1989921aef60c83e6f9127a8448fb5ede10e9acc - // Fixes the problem by checking syscall.FILE_ATTRIBUTE_DIRECTORY directly - isDir := fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 - - if isDir || isReparsePoint || mutatedFiles[relPath] { - fi, err := copyFileWithMetadata(srcRoot, destRoot, relPath, isDir) - if err != nil { - return err - } - if isDir { - di = append(di, dirInfo{path: relPath, fileInfo: *fi}) - } - } else { - err = safefile.LinkRelative(relPath, srcRoot, relPath, destRoot) - if err != nil { - return err - } - } - - return nil - }) - if err != nil { - return err - } - - return reapplyDirectoryTimes(destRoot, di) -} - -func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { - if err := w.reset(); err != nil { - return err - } - - if name == utilityVMPath { - return w.initUtilityVM() - } - - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - w.changedDi = append(w.changedDi, dirInfo{path: name, fileInfo: *fileInfo}) - } - - name = filepath.Clean(name) - if hasPathPrefix(name, utilityVMPath) { - if !w.HasUtilityVM { - return errors.New("missing UtilityVM directory") - } - if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath { - return errors.New("invalid UtilityVM layer") - } - createDisposition := uint32(winapi.FILE_OPEN) - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - st, err := safefile.LstatRelative(name, w.destRoot) - if err != nil && !os.IsNotExist(err) { - return err - } - if st != nil { - // Delete the existing file/directory if it is not the same type as this directory. - existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes - if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { - if err = safefile.RemoveAllRelative(name, w.destRoot); err != nil { - return err - } - st = nil - } - } - if st == nil { - if err = safefile.MkdirRelative(name, w.destRoot); err != nil { - return err - } - } - } else { - // Overwrite any existing hard link. - err := safefile.RemoveRelative(name, w.destRoot) - if err != nil && !os.IsNotExist(err) { - return err - } - createDisposition = winapi.FILE_CREATE - } - - f, err := safefile.OpenRelative( - name, - w.destRoot, - syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, - syscall.FILE_SHARE_READ, - createDisposition, - winapi.FILE_OPEN_REPARSE_POINT, - ) - if err != nil { - return err - } - defer func() { - if f != nil { - f.Close() - _ = safefile.RemoveRelative(name, w.destRoot) - } - }() - - err = winio.SetFileBasicInfo(f, fileInfo) - if err != nil { - return err - } - - w.backupWriter = winio.NewBackupFileWriter(f, true) - w.bufWriter.Reset(w.backupWriter) - w.currentFile = f - w.currentFileName = name - w.currentFileRoot = w.destRoot - w.addedFiles[name] = true - f = nil - return nil - } - - fname := name - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - err := safefile.MkdirRelative(name, w.root) - if err != nil { - return err - } - fname += ".$wcidirs$" - w.currentIsDir = true - } - - f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, 0) - if err != nil { - return err - } - defer func() { - if f != nil { - f.Close() - _ = safefile.RemoveRelative(fname, w.root) - } - }() - - strippedFi := *fileInfo - strippedFi.FileAttributes = 0 - err = winio.SetFileBasicInfo(f, &strippedFi) - if err != nil { - return err - } - - if hasPathPrefix(name, hivesPath) { - w.backupWriter = winio.NewBackupFileWriter(f, false) - w.bufWriter.Reset(w.backupWriter) - } else { - w.bufWriter.Reset(f) - // The file attributes are written before the stream. - err = binary.Write(w.bufWriter, binary.LittleEndian, uint32(fileInfo.FileAttributes)) - if err != nil { - w.bufWriter.Reset(ioutil.Discard) - return err - } - } - - w.currentFile = f - w.currentFileName = name - w.currentFileRoot = w.root - w.addedFiles[name] = true - f = nil - return nil -} - -func (w *legacyLayerWriter) AddLink(name string, target string) error { - if err := w.reset(); err != nil { - return err - } - - target = filepath.Clean(target) - var roots []*os.File - if hasPathPrefix(target, filesPath) { - // Look for cross-layer hard link targets in the parent layers, since - // nothing is in the destination path yet. - roots = w.parentRoots - } else if hasPathPrefix(target, utilityVMFilesPath) { - // Since the utility VM is fully cloned into the destination path - // already, look for cross-layer hard link targets directly in the - // destination path. - roots = []*os.File{w.destRoot} - } - - if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) { - return errors.New("invalid hard link in layer") - } - - // Try to find the target of the link in a previously added file. If that - // fails, search in parent layers. - var selectedRoot *os.File - if _, ok := w.addedFiles[target]; ok { - selectedRoot = w.destRoot - } else { - for _, r := range roots { - if _, err := safefile.LstatRelative(target, r); err != nil { - if !os.IsNotExist(err) { - return err - } - } else { - selectedRoot = r - break - } - } - if selectedRoot == nil { - return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target) - } - } - - // The link can't be written until after the ImportLayer call. - w.PendingLinks = append(w.PendingLinks, pendingLink{ - Path: name, - Target: target, - TargetRoot: selectedRoot, - }) - w.addedFiles[name] = true - return nil -} - -func (w *legacyLayerWriter) Remove(name string) error { - name = filepath.Clean(name) - if hasPathPrefix(name, filesPath) { - w.Tombstones = append(w.Tombstones, name) - } else if hasPathPrefix(name, utilityVMFilesPath) { - err := w.initUtilityVM() - if err != nil { - return err - } - // Make sure the path exists; os.RemoveAll will not fail if the file is - // already gone, and this needs to be a fatal error for diagnostics - // purposes. - if _, err := safefile.LstatRelative(name, w.destRoot); err != nil { - return err - } - err = safefile.RemoveAllRelative(name, w.destRoot) - if err != nil { - return err - } - } else { - return fmt.Errorf("invalid tombstone %s", name) - } - - return nil -} - -func (w *legacyLayerWriter) Write(b []byte) (int, error) { - if w.backupWriter == nil && w.currentFile == nil { - return 0, errors.New("closed") - } - return w.bufWriter.Write(b) -} - -func (w *legacyLayerWriter) Close() error { - if err := w.reset(); err != nil { - return err - } - if err := safefile.RemoveRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) { - return err - } - for _, pd := range w.pendingDirs { - err := safefile.MkdirRelative(pd.Path, pd.Root) - if err != nil { - return err - } - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go deleted file mode 100644 index c45fa2750c..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// NameToGuid converts the given string into a GUID using the algorithm in the -// Host Compute Service, ensuring GUIDs generated with the same string are common -// across all clients. -func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) { - title := "hcsshim::NameToGuid" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("objectName", name)) - - var id guid.GUID - err = nameToGuid(name, &id) - if err != nil { - return guid.GUID{}, hcserror.New(err, title, "") - } - span.AddAttributes(trace.StringAttribute("guid", id.String())) - return id, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go deleted file mode 100644 index b66e071245..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go +++ /dev/null @@ -1,46 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - "strings" - "sync" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -var prepareLayerLock sync.Mutex - -// PrepareLayer finds a mounted read-write layer matching path and enables the -// the filesystem filter for use on that layer. This requires the paths to all -// parent layers, and is necessary in order to view or interact with the layer -// as an actual filesystem (reading and writing files, creating directories, etc). -// Disabling the filter must be done via UnprepareLayer. -func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { - title := "hcsshim::PrepareLayer" - ctx, span := oc.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) - if err != nil { - return err - } - - // This lock is a temporary workaround for a Windows bug. Only allowing one - // call to prepareLayer at a time vastly reduces the chance of a timeout. - prepareLayerLock.Lock() - defer prepareLayerLock.Unlock() - err = prepareLayer(&stdDriverInfo, path, layers) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go deleted file mode 100644 index 7c49cbda45..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - "os" - - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// ProcessBaseLayer post-processes a base layer that has had its files extracted. -// The files should have been extracted to \Files. -func ProcessBaseLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::ProcessBaseLayer" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = processBaseImage(path) - if err != nil { - return &os.PathError{Op: title, Path: path, Err: err} - } - return nil -} - -// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted. -// The files should have been extracted to \Files. -func ProcessUtilityVMImage(ctx context.Context, path string) (err error) { - title := "hcsshim::ProcessUtilityVMImage" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = processUtilityImage(path) - if err != nil { - return &os.PathError{Op: title, Path: path, Err: err} - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go deleted file mode 100644 index fe20702c18..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build windows - -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// UnprepareLayer disables the filesystem filter for the read-write layer with -// the given id. -func UnprepareLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::UnprepareLayer" - ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = unprepareLayer(&stdDriverInfo, path) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go deleted file mode 100644 index 8aeab8d24e..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build windows - -package wclayer - -import "github.com/Microsoft/go-winio/pkg/guid" - -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go wclayer.go - -//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer? -//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer? -//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer? -//sys createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer? -//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize? -//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer? -//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer? -//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer? -//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath? -//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages? -//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer? -//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists? -//sys nameToGuid(name string, guid *_guid) (hr error) = vmcompute.NameToGuid? -//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer? -//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer? -//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage? -//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage? - -//sys grantVmAccess(vmid string, filepath string) (hr error) = vmcompute.GrantVmAccess? - -//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk -//sys attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk - -//sys getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) = GetDiskFreeSpaceExW - -type _guid = guid.GUID diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wcow/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wcow/doc.go deleted file mode 100644 index b02b2ddcf2..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wcow/doc.go +++ /dev/null @@ -1 +0,0 @@ -package wcow diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/wcow/scratch.go b/test/vendor/github.com/Microsoft/hcsshim/internal/wcow/scratch.go deleted file mode 100644 index 992ac0edda..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/wcow/scratch.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build windows - -package wcow - -import ( - "context" - "os" - "path/filepath" - - "github.com/Microsoft/hcsshim/internal/copyfile" - "github.com/Microsoft/hcsshim/internal/wclayer" -) - -// CreateUVMScratch is a helper to create a scratch for a Windows utility VM -// with permissions to the specified VM ID in a specified directory -func CreateUVMScratch(ctx context.Context, imagePath, destDirectory, vmID string) error { - sourceScratch := filepath.Join(imagePath, `UtilityVM\SystemTemplate.vhdx`) - targetScratch := filepath.Join(destDirectory, "sandbox.vhdx") - if err := copyfile.CopyFile(ctx, sourceScratch, targetScratch, true); err != nil { - return err - } - if err := wclayer.GrantVmAccess(ctx, vmID, targetScratch); err != nil { - os.Remove(targetScratch) - return err - } - return nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go deleted file mode 100644 index ab434a75b5..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go +++ /dev/null @@ -1,20 +0,0 @@ -package winapi - -const ( - BINDFLT_FLAG_READ_ONLY_MAPPING uint32 = 0x00000001 - BINDFLT_FLAG_MERGED_BIND_MAPPING uint32 = 0x00000002 - BINDFLT_FLAG_USE_CURRENT_SILO_MAPPING uint32 = 0x00000004 -) - -// HRESULT -// BfSetupFilterEx( -// _In_ ULONG Flags, -// _In_opt_ HANDLE JobHandle, -// _In_opt_ PSID Sid, -// _In_ LPCWSTR VirtualizationRootPath, -// _In_ LPCWSTR VirtualizationTargetPath, -// _In_reads_opt_( VirtualizationExceptionPathCount ) LPCWSTR* VirtualizationExceptionPaths, -// _In_opt_ ULONG VirtualizationExceptionPathCount -// ); -// -//sys BfSetupFilterEx(flags uint32, jobHandle windows.Handle, sid *windows.SID, virtRootPath *uint16, virtTargetPath *uint16, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) = bindfltapi.BfSetupFilterEx? diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go deleted file mode 100644 index 4547cdd8e8..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go +++ /dev/null @@ -1,46 +0,0 @@ -//go:build windows - -package winapi - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 - -// CreatePseudoConsole creates a windows pseudo console. -func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error { - // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. - return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon) -} - -// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. -func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error { - // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. - return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size)))) -} - -// HRESULT WINAPI CreatePseudoConsole( -// _In_ COORD size, -// _In_ HANDLE hInput, -// _In_ HANDLE hOutput, -// _In_ DWORD dwFlags, -// _Out_ HPCON* phPC -// ); -// -//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole - -// void WINAPI ClosePseudoConsole( -// _In_ HPCON hPC -// ); -// -//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole - -// HRESULT WINAPI ResizePseudoConsole( -// _In_ HPCON hPC , -// _In_ COORD size -// ); -// -//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go deleted file mode 100644 index 7875466cad..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build windows - -package winapi - -import "github.com/Microsoft/go-winio/pkg/guid" - -//sys CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_ID_List_SizeA -//sys CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error)= cfgmgr32.CM_Get_Device_ID_ListA -//sys CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) = cfgmgr32.CM_Locate_DevNodeW -//sys CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) = cfgmgr32.CM_Get_DevNode_PropertyW - -type DevPropKey struct { - Fmtid guid.GUID - Pid uint32 -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go deleted file mode 100644 index 9acc0bfc17..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package winapi contains various low-level bindings to Windows APIs. It can -// be thought of as an extension to golang.org/x/sys/windows. -package winapi diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go deleted file mode 100644 index 40cbf8712f..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build windows - -package winapi - -import ( - "golang.org/x/sys/windows" -) - -func IsElevated() bool { - return windows.GetCurrentProcessToken().IsElevated() -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go deleted file mode 100644 index 49ce924cbe..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build windows - -package winapi - -import "syscall" - -//sys RtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosError - -const ( - STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B - ERROR_NO_MORE_ITEMS = 0x103 - ERROR_MORE_DATA syscall.Errno = 234 -) - -func NTSuccess(status uint32) bool { - return status == 0 -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go deleted file mode 100644 index 0d78c051ba..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go +++ /dev/null @@ -1,112 +0,0 @@ -//go:build windows - -package winapi - -//sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile -//sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile - -//sys NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) = ntdll.NtOpenDirectoryObject -//sys NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32)(status uint32) = ntdll.NtQueryDirectoryObject - -const ( - FileLinkInformationClass = 11 - FileDispositionInformationExClass = 64 - - FILE_READ_ATTRIBUTES = 0x0080 - FILE_WRITE_ATTRIBUTES = 0x0100 - DELETE = 0x10000 - - FILE_OPEN = 1 - FILE_CREATE = 2 - - FILE_LIST_DIRECTORY = 0x00000001 - FILE_DIRECTORY_FILE = 0x00000001 - FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 - FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 - FILE_OPEN_REPARSE_POINT = 0x00200000 - - FILE_DISPOSITION_DELETE = 0x00000001 - - OBJ_DONT_REPARSE = 0x1000 - - STATUS_MORE_ENTRIES = 0x105 - STATUS_NO_MORE_ENTRIES = 0x8000001a -) - -// Select entries from FILE_INFO_BY_HANDLE_CLASS. -// -// C declaration: -// typedef enum _FILE_INFO_BY_HANDLE_CLASS { -// FileBasicInfo, -// FileStandardInfo, -// FileNameInfo, -// FileRenameInfo, -// FileDispositionInfo, -// FileAllocationInfo, -// FileEndOfFileInfo, -// FileStreamInfo, -// FileCompressionInfo, -// FileAttributeTagInfo, -// FileIdBothDirectoryInfo, -// FileIdBothDirectoryRestartInfo, -// FileIoPriorityHintInfo, -// FileRemoteProtocolInfo, -// FileFullDirectoryInfo, -// FileFullDirectoryRestartInfo, -// FileStorageInfo, -// FileAlignmentInfo, -// FileIdInfo, -// FileIdExtdDirectoryInfo, -// FileIdExtdDirectoryRestartInfo, -// FileDispositionInfoEx, -// FileRenameInfoEx, -// FileCaseSensitiveInfo, -// FileNormalizedNameInfo, -// MaximumFileInfoByHandleClass -// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS; -// -// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ne-minwinbase-file_info_by_handle_class -const ( - FileIdInfo = 18 -) - -type FileDispositionInformationEx struct { - Flags uintptr -} - -type IOStatusBlock struct { - Status, Information uintptr -} - -type ObjectAttributes struct { - Length uintptr - RootDirectory uintptr - ObjectName *UnicodeString - Attributes uintptr - SecurityDescriptor uintptr - SecurityQoS uintptr -} - -type ObjectDirectoryInformation struct { - Name UnicodeString - TypeName UnicodeString -} - -type FileLinkInformation struct { - ReplaceIfExists bool - RootDirectory uintptr - FileNameLength uint32 - FileName [1]uint16 -} - -// C declaration: -// typedef struct _FILE_ID_INFO { -// ULONGLONG VolumeSerialNumber; -// FILE_ID_128 FileId; -// } FILE_ID_INFO, *PFILE_ID_INFO; -// -// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_info -type FILE_ID_INFO struct { - VolumeSerialNumber uint64 - FileID [16]byte -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go deleted file mode 100644 index 4ada2f53cc..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go +++ /dev/null @@ -1,224 +0,0 @@ -//go:build windows - -package winapi - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -// Messages that can be received from an assigned io completion port. -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port -const ( - JOB_OBJECT_MSG_END_OF_JOB_TIME uint32 = 1 - JOB_OBJECT_MSG_END_OF_PROCESS_TIME uint32 = 2 - JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT uint32 = 3 - JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO uint32 = 4 - JOB_OBJECT_MSG_NEW_PROCESS uint32 = 6 - JOB_OBJECT_MSG_EXIT_PROCESS uint32 = 7 - JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS uint32 = 8 - JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT uint32 = 9 - JOB_OBJECT_MSG_JOB_MEMORY_LIMIT uint32 = 10 - JOB_OBJECT_MSG_NOTIFICATION_LIMIT uint32 = 11 -) - -// Access rights for creating or opening job objects. -// -// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights -const ( - JOB_OBJECT_QUERY = 0x0004 - JOB_OBJECT_ALL_ACCESS = 0x1F001F -) - -// IO limit flags -// -// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information -const JOB_OBJECT_IO_RATE_CONTROL_ENABLE = 0x1 - -const JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE uint32 = 0x1 - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information -const ( - JOB_OBJECT_CPU_RATE_CONTROL_ENABLE uint32 = 1 << iota - JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED - JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP - JOB_OBJECT_CPU_RATE_CONTROL_NOTIFY - JOB_OBJECT_CPU_RATE_CONTROL_MIN_MAX_RATE -) - -// JobObjectInformationClass values. Used for a call to QueryInformationJobObject -// -// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-queryinformationjobobject -const ( - JobObjectBasicAccountingInformation uint32 = 1 - JobObjectBasicProcessIdList uint32 = 3 - JobObjectBasicAndIoAccountingInformation uint32 = 8 - JobObjectLimitViolationInformation uint32 = 13 - JobObjectMemoryUsageInformation uint32 = 28 - JobObjectNotificationLimitInformation2 uint32 = 33 - JobObjectCreateSilo uint32 = 35 - JobObjectIoAttribution uint32 = 42 -) - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_limit_information -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 -} - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information -type JOBOBJECT_CPU_RATE_CONTROL_INFORMATION struct { - ControlFlags uint32 - Value uint32 -} - -// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information -type JOBOBJECT_IO_RATE_CONTROL_INFORMATION struct { - MaxIops int64 - MaxBandwidth int64 - ReservationIops int64 - BaseIOSize uint32 - VolumeName string - ControlFlags uint32 -} - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_process_id_list -type JOBOBJECT_BASIC_PROCESS_ID_LIST struct { - NumberOfAssignedProcesses uint32 - NumberOfProcessIdsInList uint32 - ProcessIdList [1]uintptr -} - -// AllPids returns all the process Ids in the job object. -func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr { - return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList] -} - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information -type JOBOBJECT_BASIC_ACCOUNTING_INFORMATION struct { - TotalUserTime int64 - TotalKernelTime int64 - ThisPeriodTotalUserTime int64 - ThisPeriodTotalKernelTime int64 - TotalPageFaultCount uint32 - TotalProcesses uint32 - ActiveProcesses uint32 - TotalTerminateProcesses uint32 -} - -//https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information -type JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION struct { - BasicInfo JOBOBJECT_BASIC_ACCOUNTING_INFORMATION - IoInfo windows.IO_COUNTERS -} - -// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION { -// ULONG64 JobMemory; -// ULONG64 PeakJobMemoryUsed; -// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION; -// -type JOBOBJECT_MEMORY_USAGE_INFORMATION struct { - JobMemory uint64 - PeakJobMemoryUsed uint64 -} - -// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS { -// ULONG_PTR IoCount; -// ULONGLONG TotalNonOverlappedQueueTime; -// ULONGLONG TotalNonOverlappedServiceTime; -// ULONGLONG TotalSize; -// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS; -// -type JOBOBJECT_IO_ATTRIBUTION_STATS struct { - IoCount uintptr - TotalNonOverlappedQueueTime uint64 - TotalNonOverlappedServiceTime uint64 - TotalSize uint64 -} - -// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION { -// ULONG ControlFlags; -// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats; -// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats; -// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION; -// -type JOBOBJECT_IO_ATTRIBUTION_INFORMATION struct { - ControlFlags uint32 - ReadStats JOBOBJECT_IO_ATTRIBUTION_STATS - WriteStats JOBOBJECT_IO_ATTRIBUTION_STATS -} - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port -type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { - CompletionKey windows.Handle - CompletionPort windows.Handle -} - -// BOOL IsProcessInJob( -// HANDLE ProcessHandle, -// HANDLE JobHandle, -// PBOOL Result -// ); -// -//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob - -// BOOL QueryInformationJobObject( -// HANDLE hJob, -// JOBOBJECTINFOCLASS JobObjectInformationClass, -// LPVOID lpJobObjectInformation, -// DWORD cbJobObjectInformationLength, -// LPDWORD lpReturnLength -// ); -// -//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject - -// HANDLE OpenJobObjectW( -// DWORD dwDesiredAccess, -// BOOL bInheritHandle, -// LPCWSTR lpName -// ); -// -//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW - -// DWORD SetIoRateControlInformationJobObject( -// HANDLE hJob, -// JOBOBJECT_IO_RATE_CONTROL_INFORMATION *IoRateControlInfo -// ); -// -//sys SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) = kernel32.SetIoRateControlInformationJobObject - -// DWORD QueryIoRateControlInformationJobObject( -// HANDLE hJob, -// PCWSTR VolumeName, -// JOBOBJECT_IO_RATE_CONTROL_INFORMATION **InfoBlocks, -// ULONG *InfoBlockCount -// ); -// -//sys QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) = kernel32.QueryIoRateControlInformationJobObject - -// NTSTATUS -// NtOpenJobObject ( -// _Out_ PHANDLE JobHandle, -// _In_ ACCESS_MASK DesiredAccess, -// _In_ POBJECT_ATTRIBUTES ObjectAttributes -// ); -// -//sys NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtOpenJobObject - -// NTSTATUS -// NTAPI -// NtCreateJobObject ( -// _Out_ PHANDLE JobHandle, -// _In_ ACCESS_MASK DesiredAccess, -// _In_opt_ POBJECT_ATTRIBUTES ObjectAttributes -// ); -// -//sys NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtCreateJobObject diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go deleted file mode 100644 index c6a149b552..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go +++ /dev/null @@ -1,12 +0,0 @@ -package winapi - -// DWORD SearchPathW( -// LPCWSTR lpPath, -// LPCWSTR lpFileName, -// LPCWSTR lpExtension, -// DWORD nBufferLength, -// LPWSTR lpBuffer, -// LPWSTR *lpFilePart -// ); -// -//sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) = kernel32.SearchPathW diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go deleted file mode 100644 index 5f9e03fd28..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go +++ /dev/null @@ -1,65 +0,0 @@ -package winapi - -const PROCESS_ALL_ACCESS uint32 = 2097151 - -const ( - PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016 - PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D -) - -// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures. -const ProcessVmCounters = 3 - -// __kernel_entry NTSTATUS NtQueryInformationProcess( -// [in] HANDLE ProcessHandle, -// [in] PROCESSINFOCLASS ProcessInformationClass, -// [out] PVOID ProcessInformation, -// [in] ULONG ProcessInformationLength, -// [out, optional] PULONG ReturnLength -// ); -// -//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo uintptr, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess - -// typedef struct _VM_COUNTERS_EX -// { -// SIZE_T PeakVirtualSize; -// SIZE_T VirtualSize; -// ULONG PageFaultCount; -// SIZE_T PeakWorkingSetSize; -// SIZE_T WorkingSetSize; -// SIZE_T QuotaPeakPagedPoolUsage; -// SIZE_T QuotaPagedPoolUsage; -// SIZE_T QuotaPeakNonPagedPoolUsage; -// SIZE_T QuotaNonPagedPoolUsage; -// SIZE_T PagefileUsage; -// SIZE_T PeakPagefileUsage; -// SIZE_T PrivateUsage; -// } VM_COUNTERS_EX, *PVM_COUNTERS_EX; -// -type VM_COUNTERS_EX struct { - PeakVirtualSize uintptr - VirtualSize uintptr - PageFaultCount uint32 - PeakWorkingSetSize uintptr - WorkingSetSize uintptr - QuotaPeakPagedPoolUsage uintptr - QuotaPagedPoolUsage uintptr - QuotaPeakNonPagedPoolUsage uintptr - QuotaNonPagedPoolUsage uintptr - PagefileUsage uintptr - PeakPagefileUsage uintptr - PrivateUsage uintptr -} - -// typedef struct _VM_COUNTERS_EX2 -// { -// VM_COUNTERS_EX CountersEx; -// SIZE_T PrivateWorkingSetSize; -// SIZE_T SharedCommitUsage; -// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2; -// -type VM_COUNTERS_EX2 struct { - CountersEx VM_COUNTERS_EX - PrivateWorkingSetSize uintptr - SharedCommitUsage uintptr -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go deleted file mode 100644 index f5b1868e48..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build windows - -package winapi - -import "golang.org/x/sys/windows" - -const SystemProcessInformation = 5 - -const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 - -// __kernel_entry NTSTATUS NtQuerySystemInformation( -// SYSTEM_INFORMATION_CLASS SystemInformationClass, -// PVOID SystemInformation, -// ULONG SystemInformationLength, -// PULONG ReturnLength -// ); -// -//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation - -type SYSTEM_PROCESS_INFORMATION struct { - NextEntryOffset uint32 // ULONG - NumberOfThreads uint32 // ULONG - WorkingSetPrivateSize int64 // LARGE_INTEGER - HardFaultCount uint32 // ULONG - NumberOfThreadsHighWatermark uint32 // ULONG - CycleTime uint64 // ULONGLONG - CreateTime int64 // LARGE_INTEGER - UserTime int64 // LARGE_INTEGER - KernelTime int64 // LARGE_INTEGER - ImageName UnicodeString // UNICODE_STRING - BasePriority int32 // KPRIORITY - UniqueProcessID windows.Handle // HANDLE - InheritedFromUniqueProcessID windows.Handle // HANDLE - HandleCount uint32 // ULONG - SessionID uint32 // ULONG - UniqueProcessKey *uint32 // ULONG_PTR - PeakVirtualSize uintptr // SIZE_T - VirtualSize uintptr // SIZE_T - PageFaultCount uint32 // ULONG - PeakWorkingSetSize uintptr // SIZE_T - WorkingSetSize uintptr // SIZE_T - QuotaPeakPagedPoolUsage uintptr // SIZE_T - QuotaPagedPoolUsage uintptr // SIZE_T - QuotaPeakNonPagedPoolUsage uintptr // SIZE_T - QuotaNonPagedPoolUsage uintptr // SIZE_T - PagefileUsage uintptr // SIZE_T - PeakPagefileUsage uintptr // SIZE_T - PrivatePageCount uintptr // SIZE_T - ReadOperationCount int64 // LARGE_INTEGER - WriteOperationCount int64 // LARGE_INTEGER - OtherOperationCount int64 // LARGE_INTEGER - ReadTransferCount int64 // LARGE_INTEGER - WriteTransferCount int64 // LARGE_INTEGER - OtherTransferCount int64 // LARGE_INTEGER -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go deleted file mode 100644 index f23141a836..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go +++ /dev/null @@ -1,13 +0,0 @@ -package winapi - -// HANDLE CreateRemoteThread( -// HANDLE hProcess, -// LPSECURITY_ATTRIBUTES lpThreadAttributes, -// SIZE_T dwStackSize, -// LPTHREAD_START_ROUTINE lpStartAddress, -// LPVOID lpParameter, -// DWORD dwCreationFlags, -// LPDWORD lpThreadId -// ); -// -//sys CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) = kernel32.CreateRemoteThread diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go deleted file mode 100644 index 8abc095d60..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go +++ /dev/null @@ -1,194 +0,0 @@ -//go:build windows - -package winapi - -import ( - "syscall" - - "golang.org/x/sys/windows" -) - -const UserNameCharLimit = 20 - -const ( - USER_PRIV_GUEST uint32 = iota - USER_PRIV_USER - USER_PRIV_ADMIN -) - -const ( - UF_NORMAL_ACCOUNT = 0x00200 - UF_DONT_EXPIRE_PASSWD = 0x10000 -) - -const NERR_UserNotFound = syscall.Errno(0x8AD) - -// typedef struct _LOCALGROUP_MEMBERS_INFO_0 { -// PSID lgrmi0_sid; -// } LOCALGROUP_MEMBERS_INFO_0, *PLOCALGROUP_MEMBERS_INFO_0, *LPLOCALGROUP_MEMBERS_INFO_0; -type LocalGroupMembersInfo0 struct { - Sid *windows.SID -} - -// typedef struct _LOCALGROUP_INFO_1 { -// LPWSTR lgrpi1_name; -// LPWSTR lgrpi1_comment; -// } LOCALGROUP_INFO_1, *PLOCALGROUP_INFO_1, *LPLOCALGROUP_INFO_1; -type LocalGroupInfo1 struct { - Name *uint16 - Comment *uint16 -} - -// typedef struct _USER_INFO_1 { -// LPWSTR usri1_name; -// LPWSTR usri1_password; -// DWORD usri1_password_age; -// DWORD usri1_priv; -// LPWSTR usri1_home_dir; -// LPWSTR usri1_comment; -// DWORD usri1_flags; -// LPWSTR usri1_script_path; -// } USER_INFO_1, *PUSER_INFO_1, *LPUSER_INFO_1; -type UserInfo1 struct { - Name *uint16 - Password *uint16 - PasswordAge uint32 - Priv uint32 - HomeDir *uint16 - Comment *uint16 - Flags uint32 - ScriptPath *uint16 -} - -// NET_API_STATUS NET_API_FUNCTION NetLocalGroupGetInfo( -// [in] LPCWSTR servername, -// [in] LPCWSTR groupname, -// [in] DWORD level, -// [out] LPBYTE *bufptr -// ); -// -//sys netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) = netapi32.NetLocalGroupGetInfo - -// NetLocalGroupGetInfo is a slightly go friendlier wrapper around the NetLocalGroupGetInfo function. Instead of taking in *uint16's, it takes in -// go strings and does the conversion internally. -func NetLocalGroupGetInfo(serverName, groupName string, level uint32, bufPtr **byte) (err error) { - var ( - serverNameUTF16 *uint16 - groupNameUTF16 *uint16 - ) - if serverName != "" { - serverNameUTF16, err = windows.UTF16PtrFromString(serverName) - if err != nil { - return err - } - } - if groupName != "" { - groupNameUTF16, err = windows.UTF16PtrFromString(groupName) - if err != nil { - return err - } - } - return netLocalGroupGetInfo( - serverNameUTF16, - groupNameUTF16, - level, - bufPtr, - ) -} - -// NET_API_STATUS NET_API_FUNCTION NetUserAdd( -// [in] LPCWSTR servername, -// [in] DWORD level, -// [in] LPBYTE buf, -// [out] LPDWORD parm_err -// ); -// -//sys netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) = netapi32.NetUserAdd - -// NetUserAdd is a slightly go friendlier wrapper around the NetUserAdd function. Instead of taking in *uint16's, it takes in -// go strings and does the conversion internally. -func NetUserAdd(serverName string, level uint32, buf *byte, parm_err *uint32) (err error) { - var serverNameUTF16 *uint16 - if serverName != "" { - serverNameUTF16, err = windows.UTF16PtrFromString(serverName) - if err != nil { - return err - } - } - return netUserAdd( - serverNameUTF16, - level, - buf, - parm_err, - ) -} - -// NET_API_STATUS NET_API_FUNCTION NetUserDel( -// [in] LPCWSTR servername, -// [in] LPCWSTR username -// ); -// -//sys netUserDel(serverName *uint16, username *uint16) (status error) = netapi32.NetUserDel - -// NetUserDel is a slightly go friendlier wrapper around the NetUserDel function. Instead of taking in *uint16's, it takes in -// go strings and does the conversion internally. -func NetUserDel(serverName, userName string) (err error) { - var ( - serverNameUTF16 *uint16 - userNameUTF16 *uint16 - ) - if serverName != "" { - serverNameUTF16, err = windows.UTF16PtrFromString(serverName) - if err != nil { - return err - } - } - if userName != "" { - userNameUTF16, err = windows.UTF16PtrFromString(userName) - if err != nil { - return err - } - } - return netUserDel( - serverNameUTF16, - userNameUTF16, - ) -} - -// NET_API_STATUS NET_API_FUNCTION NetLocalGroupAddMembers( -// [in] LPCWSTR servername, -// [in] LPCWSTR groupname, -// [in] DWORD level, -// [in] LPBYTE buf, -// [in] DWORD totalentries -// ); -// -//sys netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) = netapi32.NetLocalGroupAddMembers - -// NetLocalGroupAddMembers is a slightly go friendlier wrapper around the NetLocalGroupAddMembers function. Instead of taking in *uint16's, it takes in -// go strings and does the conversion internally. -func NetLocalGroupAddMembers(serverName, groupName string, level uint32, buf *byte, totalEntries uint32) (err error) { - var ( - serverNameUTF16 *uint16 - groupNameUTF16 *uint16 - ) - if serverName != "" { - serverNameUTF16, err = windows.UTF16PtrFromString(serverName) - if err != nil { - return err - } - } - if groupName != "" { - groupNameUTF16, err = windows.UTF16PtrFromString(groupName) - if err != nil { - return err - } - } - return netLocalGroupAddMembers( - serverNameUTF16, - groupNameUTF16, - level, - buf, - totalEntries, - ) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go deleted file mode 100644 index 7b93974846..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go +++ /dev/null @@ -1,82 +0,0 @@ -//go:build windows - -package winapi - -import ( - "errors" - "reflect" - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -// Uint16BufferToSlice wraps a uint16 pointer-and-length into a slice -// for easier interop with Go APIs -func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&result)) - hdr.Data = uintptr(unsafe.Pointer(buffer)) - hdr.Cap = bufferLength - hdr.Len = bufferLength - - return -} - -// UnicodeString corresponds to UNICODE_STRING win32 struct defined here -// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string -type UnicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer *uint16 -} - -// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value -// denotes the maximum number of wide chars a path can have. -const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767 - -//String converts a UnicodeString to a golang string -func (uni UnicodeString) String() string { - // UnicodeString is not guaranteed to be null terminated, therefore - // use the UnicodeString's Length field - return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) -} - -// NewUnicodeString allocates a new UnicodeString and copies `s` into -// the buffer of the new UnicodeString. -func NewUnicodeString(s string) (*UnicodeString, error) { - buf, err := windows.UTF16FromString(s) - if err != nil { - return nil, err - } - - if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH { - return nil, syscall.ENAMETOOLONG - } - - uni := &UnicodeString{ - // The length is in bytes and should not include the trailing null character. - Length: uint16((len(buf) - 1) * 2), - MaximumLength: uint16((len(buf) - 1) * 2), - Buffer: &buf[0], - } - return uni, nil -} - -// ConvertStringSetToSlice is a helper function used to convert the contents of -// `buf` into a string slice. `buf` contains a set of null terminated strings -// with an additional null at the end to indicate the end of the set. -func ConvertStringSetToSlice(buf []byte) ([]string, error) { - var results []string - prev := 0 - for i := range buf { - if buf[i] == 0 { - if prev == i { - // found two null characters in a row, return result - return results, nil - } - results = append(results, string(buf[prev:i])) - prev = i + 1 - } - } - return nil, errors.New("string set malformed: missing null terminator at end of buffer") -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go deleted file mode 100644 index b45fc7de43..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go +++ /dev/null @@ -1,3 +0,0 @@ -package winapi - -//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go bindflt.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go deleted file mode 100644 index ea4eb27e62..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ /dev/null @@ -1,407 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package winapi - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modbindfltapi = windows.NewLazySystemDLL("bindfltapi.dll") - modnetapi32 = windows.NewLazySystemDLL("netapi32.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") - - procBfSetupFilterEx = modbindfltapi.NewProc("BfSetupFilterEx") - procNetLocalGroupGetInfo = modnetapi32.NewProc("NetLocalGroupGetInfo") - procNetUserAdd = modnetapi32.NewProc("NetUserAdd") - procNetUserDel = modnetapi32.NewProc("NetUserDel") - procNetLocalGroupAddMembers = modnetapi32.NewProc("NetLocalGroupAddMembers") - procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") - procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") - procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") - procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") - procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") - procSearchPathW = modkernel32.NewProc("SearchPathW") - procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") - procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") - procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") - procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") - procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject") - procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject") - procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") - procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") - procLogonUserW = modadvapi32.NewProc("LogonUserW") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") - procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") - procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") - procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") - procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") - procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") - procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW") - procNtCreateFile = modntdll.NewProc("NtCreateFile") - procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") - procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject") - procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject") - procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") -) - -func BfSetupFilterEx(flags uint32, jobHandle windows.Handle, sid *windows.SID, virtRootPath *uint16, virtTargetPath *uint16, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) { - if hr = procBfSetupFilterEx.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall9(procBfSetupFilterEx.Addr(), 7, uintptr(flags), uintptr(jobHandle), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) { - r0, _, _ := syscall.Syscall6(procNetLocalGroupGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(bufptr)), 0, 0) - if r0 != 0 { - status = syscall.Errno(r0) - } - return -} - -func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) { - r0, _, _ := syscall.Syscall6(procNetUserAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parm_err)), 0, 0) - if r0 != 0 { - status = syscall.Errno(r0) - } - return -} - -func netUserDel(serverName *uint16, username *uint16) (status error) { - r0, _, _ := syscall.Syscall(procNetUserDel.Addr(), 2, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(username)), 0) - if r0 != 0 { - status = syscall.Errno(r0) - } - return -} - -func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) { - r0, _, _ := syscall.Syscall6(procNetLocalGroupAddMembers.Addr(), 5, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(totalEntries), 0) - if r0 != 0 { - status = syscall.Errno(r0) - } - return -} - -func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func ClosePseudoConsole(hpc windows.Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) - return -} - -func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) - status = uint32(r0) - return -} - -func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { - r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) - if r0 != 0 { - win32Err = syscall.Errno(r0) - } - return -} - -func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) - size = uint32(r0) - if size == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) - handle = windows.Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) { - r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName))) - handle = windows.Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) - ret = uint32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) - ret = uint32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) - status = uint32(r0) - return -} - -func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) - status = uint32(r0) - return -} - -func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { - r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LocalAlloc(flags uint32, size int) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) - ptr = uintptr(r0) - return -} - -func LocalFree(ptr uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) - return -} - -func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo uintptr, processInfoLength uint32, returnLength *uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0) - status = uint32(r0) - return -} - -func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) - amount = uint32(r0) - return -} - -func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(pDeviceID) - if hr != nil { - return - } - return _CMLocateDevNode(pdnDevInst, _p0, uFlags) -} - -func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) - status = uint32(r0) - return -} - -func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) - status = uint32(r0) - return -} - -func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) - status = uint32(r0) - return -} - -func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32) (status uint32) { - var _p0 uint32 - if singleEntry { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if restartScan { - _p1 = 1 - } else { - _p1 = 0 - } - r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0) - status = uint32(r0) - return -} - -func RtlNtStatusToDosError(status uint32) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0) - if r0 != 0 { - winerr = syscall.Errno(r0) - } - return -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/layer.go b/test/vendor/github.com/Microsoft/hcsshim/layer.go deleted file mode 100644 index e323c8308d..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/layer.go +++ /dev/null @@ -1,109 +0,0 @@ -//go:build windows - -package hcsshim - -import ( - "context" - "crypto/sha1" - "path/filepath" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/wclayer" -) - -func layerPath(info *DriverInfo, id string) string { - return filepath.Join(info.HomeDir, id) -} - -func ActivateLayer(info DriverInfo, id string) error { - return wclayer.ActivateLayer(context.Background(), layerPath(&info, id)) -} -func CreateLayer(info DriverInfo, id, parent string) error { - return wclayer.CreateLayer(context.Background(), layerPath(&info, id), parent) -} - -// New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility. -func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { - return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) -} -func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { - return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) -} -func DeactivateLayer(info DriverInfo, id string) error { - return wclayer.DeactivateLayer(context.Background(), layerPath(&info, id)) -} -func DestroyLayer(info DriverInfo, id string) error { - return wclayer.DestroyLayer(context.Background(), layerPath(&info, id)) -} - -// New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility. -func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { - return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) -} -func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error { - return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) -} -func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error { - return wclayer.ExportLayer(context.Background(), layerPath(&info, layerId), exportFolderPath, parentLayerPaths) -} -func GetLayerMountPath(info DriverInfo, id string) (string, error) { - return wclayer.GetLayerMountPath(context.Background(), layerPath(&info, id)) -} -func GetSharedBaseImages() (imageData string, err error) { - return wclayer.GetSharedBaseImages(context.Background()) -} -func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error { - return wclayer.ImportLayer(context.Background(), layerPath(&info, layerID), importFolderPath, parentLayerPaths) -} -func LayerExists(info DriverInfo, id string) (bool, error) { - return wclayer.LayerExists(context.Background(), layerPath(&info, id)) -} -func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error { - return wclayer.PrepareLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) -} -func ProcessBaseLayer(path string) error { - return wclayer.ProcessBaseLayer(context.Background(), path) -} -func ProcessUtilityVMImage(path string) error { - return wclayer.ProcessUtilityVMImage(context.Background(), path) -} -func UnprepareLayer(info DriverInfo, layerId string) error { - return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId)) -} - -type DriverInfo struct { - Flavour int - HomeDir string -} - -type GUID [16]byte - -func NameToGuid(name string) (id GUID, err error) { - g, err := wclayer.NameToGuid(context.Background(), name) - return g.ToWindowsArray(), err -} - -func NewGUID(source string) *GUID { - h := sha1.Sum([]byte(source)) - var g GUID - copy(g[0:], h[0:16]) - return &g -} - -func (g *GUID) ToString() string { - return guid.FromWindowsArray(*g).String() -} - -type LayerReader = wclayer.LayerReader - -func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) { - return wclayer.NewLayerReader(context.Background(), layerPath(&info, layerID), parentLayerPaths) -} - -type LayerWriter = wclayer.LayerWriter - -func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { - return wclayer.NewLayerWriter(context.Background(), layerPath(&info, layerID), parentLayerPaths) -} - -type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR diff --git a/test/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/test/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go deleted file mode 100644 index ecb0766164..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go +++ /dev/null @@ -1,52 +0,0 @@ -//go:build windows - -package osversion - -import ( - "fmt" - "sync" - - "golang.org/x/sys/windows" -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -var ( - osv OSVersion - once sync.Once -) - -// Get gets the operating system version on Windows. -// The calling application must be manifested to get the correct version information. -func Get() OSVersion { - once.Do(func() { - var err error - osv = OSVersion{} - osv.Version, err = windows.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - }) - return osv -} - -// Build gets the build-number on Windows -// The calling application must be manifested to get the correct version information. -func Build() uint16 { - return Get().Build -} - -func (osv OSVersion) ToString() string { - return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/doc.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/doc.go deleted file mode 100644 index f2523af44a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/doc.go +++ /dev/null @@ -1 +0,0 @@ -package runhcs diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go deleted file mode 100644 index 1d82f72c6f..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go +++ /dev/null @@ -1,175 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "bytes" - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - "sync" - "sync/atomic" - - irunhcs "github.com/Microsoft/hcsshim/internal/runhcs" - "github.com/containerd/go-runc" -) - -// Format is the type of log formatting options available. -type Format string - -const ( - none Format = "" - // Text is the default text log output. - Text Format = "text" - // JSON is the JSON formatted log output. - JSON Format = "json" -) - -var runhcsPath atomic.Value - -func getCommandPath() string { - const command = "runhcs.exe" - - pathi := runhcsPath.Load() - if pathi == nil { - path, err := exec.LookPath(command) - if err != nil { - // LookPath only finds current directory matches based on the - // callers current directory but the caller is not likely in the - // same directory as the containerd executables. Instead match the - // calling binaries path (a containerd shim usually) and see if they - // are side by side. If so execute the runhcs.exe found there. - if self, serr := os.Executable(); serr == nil { - testPath := filepath.Join(filepath.Dir(self), command) - if _, serr := os.Stat(testPath); serr == nil { - path = testPath - } - } - if path == "" { - // Failed to look up command just use it directly and let the - // Windows loader find it. - path = command - } - runhcsPath.Store(path) - return path - } - apath, err := filepath.Abs(path) - if err != nil { - // We couldnt make `path` an `AbsPath`. Just use `path` directly and - // let the Windows loader find it. - apath = path - } - runhcsPath.Store(apath) - return apath - } - return pathi.(string) -} - -var bytesBufferPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(nil) - }, -} - -func getBuf() *bytes.Buffer { - return bytesBufferPool.Get().(*bytes.Buffer) -} - -func putBuf(b *bytes.Buffer) { - b.Reset() - bytesBufferPool.Put(b) -} - -// Runhcs is the client to the runhcs cli -type Runhcs struct { - // Debug enables debug output for logging. - Debug bool - // Log sets the log file path or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs-log) where internal debug information is written. - Log string - // LogFormat sets the format used by logs. - LogFormat Format - // Owner sets the compute system owner property. - Owner string - // Root is the registry key root for storage of runhcs container state. - Root string -} - -func (r *Runhcs) args() []string { - var out []string - if r.Debug { - out = append(out, "--debug") - } - if r.Log != "" { - if strings.HasPrefix(r.Log, irunhcs.SafePipePrefix) { - out = append(out, "--log", r.Log) - } else { - abs, err := filepath.Abs(r.Log) - if err == nil { - out = append(out, "--log", abs) - } - } - } - if r.LogFormat != none { - out = append(out, "--log-format", string(r.LogFormat)) - } - if r.Owner != "" { - out = append(out, "--owner", r.Owner) - } - if r.Root != "" { - out = append(out, "--root", r.Root) - } - return out -} - -func (r *Runhcs) command(context context.Context, args ...string) *exec.Cmd { - cmd := exec.CommandContext(context, getCommandPath(), append(r.args(), args...)...) - cmd.Env = os.Environ() - return cmd -} - -// runOrError will run the provided command. If an error is -// encountered and neither Stdout or Stderr was set the error and the -// stderr of the command will be returned in the format of : -// -func (r *Runhcs) runOrError(cmd *exec.Cmd) error { - if cmd.Stdout != nil || cmd.Stderr != nil { - ec, err := runc.Monitor.Start(cmd) - if err != nil { - return err - } - status, err := runc.Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) - } - return err - } - data, err := cmdOutput(cmd, true) - if err != nil { - return fmt.Errorf("%s: %s", err, data) - } - return nil -} - -func cmdOutput(cmd *exec.Cmd, combined bool) ([]byte, error) { - b := getBuf() - defer putBuf(b) - - cmd.Stdout = b - if combined { - cmd.Stderr = b - } - ec, err := runc.Monitor.Start(cmd) - if err != nil { - return nil, err - } - - status, err := runc.Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) - } - - return b.Bytes(), err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go deleted file mode 100644 index 956e4c1f77..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go +++ /dev/null @@ -1,56 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "context" - "errors" - "path/filepath" - "strconv" -) - -// CreateScratch creates a scratch vhdx at 'destpath' that is ext4 formatted. -func (r *Runhcs) CreateScratch(context context.Context, destpath string) error { - return r.CreateScratchWithOpts(context, destpath, nil) -} - -// CreateScratchOpts is the set of options that can be used with the -// `CreateScratchWithOpts` command. -type CreateScratchOpts struct { - // SizeGB is the size in GB of the scratch file to create. - SizeGB int - // CacheFile is the path to an existing `scratch.vhx` to copy. If - // `CacheFile` does not exit the scratch will be created. - CacheFile string -} - -func (opt *CreateScratchOpts) args() ([]string, error) { - var out []string - if opt.SizeGB < 0 { - return nil, errors.New("sizeGB must be >= 0") - } else if opt.SizeGB > 0 { - out = append(out, "--sizeGB", strconv.Itoa(opt.SizeGB)) - } - if opt.CacheFile != "" { - abs, err := filepath.Abs(opt.CacheFile) - if err != nil { - return nil, err - } - out = append(out, "--cache-path", abs) - } - return out, nil -} - -// CreateScratchWithOpts creates a scratch vhdx at 'destpath' that is ext4 -// formatted based on `opts`. -func (r *Runhcs) CreateScratchWithOpts(context context.Context, destpath string, opts *CreateScratchOpts) error { - args := []string{"create-scratch", "--destpath", destpath} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - return r.runOrError(r.command(context, args...)) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go deleted file mode 100644 index f908de4e29..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go +++ /dev/null @@ -1,103 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - irunhcs "github.com/Microsoft/hcsshim/internal/runhcs" - runc "github.com/containerd/go-runc" -) - -// CreateOpts is set of options that can be used with the Create command. -type CreateOpts struct { - runc.IO - // PidFile is the path to the file to write the process id to. - PidFile string - // ShimLog is the path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs--shim-log) for the launched shim process. - ShimLog string - // VMLog is the path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs--vm-log) for the launched VM shim process. - VMLog string - // VMConsole is the path to the pipe for the VM's console (e.g. \\.\pipe\debugpipe) - VMConsole string -} - -func (opt *CreateOpts) args() ([]string, error) { - var out []string - if opt.PidFile != "" { - abs, err := filepath.Abs(opt.PidFile) - if err != nil { - return nil, err - } - out = append(out, "--pid-file", abs) - } - if opt.ShimLog != "" { - if strings.HasPrefix(opt.ShimLog, irunhcs.SafePipePrefix) { - out = append(out, "--shim-log", opt.ShimLog) - } else { - abs, err := filepath.Abs(opt.ShimLog) - if err != nil { - return nil, err - } - out = append(out, "--shim-log", abs) - } - } - if opt.VMLog != "" { - if strings.HasPrefix(opt.VMLog, irunhcs.SafePipePrefix) { - out = append(out, "--vm-log", opt.VMLog) - } else { - abs, err := filepath.Abs(opt.VMLog) - if err != nil { - return nil, err - } - out = append(out, "--vm-log", abs) - } - } - if opt.VMConsole != "" { - out = append(out, "--vm-console", opt.VMConsole) - } - return out, nil -} - -// Create creates a new container and returns its pid if it was created -// successfully. -func (r *Runhcs) Create(context context.Context, id, bundle string, opts *CreateOpts) error { - args := []string{"create", "--bundle", bundle} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - cmd := r.command(context, append(args, id)...) - if opts != nil && opts.IO != nil { - opts.Set(cmd) - } - if cmd.Stdout == nil && cmd.Stderr == nil { - data, err := cmdOutput(cmd, true) - if err != nil { - return fmt.Errorf("%s: %s", err, data) - } - return nil - } - ec, err := runc.Monitor.Start(cmd) - if err != nil { - return err - } - if opts != nil && opts.IO != nil { - if c, ok := opts.IO.(runc.StartCloser); ok { - if err := c.CloseAfterStart(); err != nil { - return err - } - } - } - status, err := runc.Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) - } - return err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go deleted file mode 100644 index 307a1de5c1..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "context" -) - -// DeleteOpts is set of options that can be used with the Delete command. -type DeleteOpts struct { - // Force forcibly deletes the container if it is still running (uses SIGKILL). - Force bool -} - -func (opt *DeleteOpts) args() ([]string, error) { - var out []string - if opt.Force { - out = append(out, "--force") - } - return out, nil -} - -// Delete any resources held by the container often used with detached -// containers. -func (r *Runhcs) Delete(context context.Context, id string, opts *DeleteOpts) error { - args := []string{"delete"} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - return r.runOrError(r.command(context, append(args, id)...)) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go deleted file mode 100644 index a85ee66f7a..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go +++ /dev/null @@ -1,90 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - irunhcs "github.com/Microsoft/hcsshim/internal/runhcs" - "github.com/containerd/go-runc" -) - -// ExecOpts is set of options that can be used with the Exec command. -type ExecOpts struct { - runc.IO - // Detach from the container's process. - Detach bool - // PidFile is the path to the file to write the process id to. - PidFile string - // ShimLog is the path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs---log) for the launched shim process. - ShimLog string -} - -func (opt *ExecOpts) args() ([]string, error) { - var out []string - if opt.Detach { - out = append(out, "--detach") - } - if opt.PidFile != "" { - abs, err := filepath.Abs(opt.PidFile) - if err != nil { - return nil, err - } - out = append(out, "--pid-file", abs) - } - if opt.ShimLog != "" { - if strings.HasPrefix(opt.ShimLog, irunhcs.SafePipePrefix) { - out = append(out, "--shim-log", opt.ShimLog) - } else { - abs, err := filepath.Abs(opt.ShimLog) - if err != nil { - return nil, err - } - out = append(out, "--shim-log", abs) - } - } - return out, nil -} - -// Exec executes an additional process inside the container based on the -// oci.Process spec found at processFile. -func (r *Runhcs) Exec(context context.Context, id, processFile string, opts *ExecOpts) error { - args := []string{"exec", "--process", processFile} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - cmd := r.command(context, append(args, id)...) - if opts != nil && opts.IO != nil { - opts.Set(cmd) - } - if cmd.Stdout == nil && cmd.Stderr == nil { - data, err := cmdOutput(cmd, true) - if err != nil { - return fmt.Errorf("%s: %s", err, data) - } - return nil - } - ec, err := runc.Monitor.Start(cmd) - if err != nil { - return err - } - if opts != nil && opts.IO != nil { - if c, ok := opts.IO.(runc.StartCloser); ok { - if err := c.CloseAfterStart(); err != nil { - return err - } - } - } - status, err := runc.Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) - } - return err -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go deleted file mode 100644 index 8480c64929..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "context" -) - -// Kill sends the specified signal (default: SIGTERM) to the container's init -// process. -func (r *Runhcs) Kill(context context.Context, id, signal string) error { - return r.runOrError(r.command(context, "kill", id, signal)) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go deleted file mode 100644 index d7e88a2f05..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "context" - "encoding/json" - - irunhcs "github.com/Microsoft/hcsshim/internal/runhcs" -) - -// ContainerState is the representation of the containers state at the moment of -// query. -type ContainerState = irunhcs.ContainerState - -// List containers started by runhcs. -// -// Note: This is specific to the Runhcs.Root namespace provided in the global -// settings. -func (r *Runhcs) List(context context.Context) ([]*ContainerState, error) { - data, err := cmdOutput(r.command(context, "list", "--format=json"), false) - if err != nil { - return nil, err - } - var out []*ContainerState - if err := json.Unmarshal(data, &out); err != nil { - return nil, err - } - return out, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go deleted file mode 100644 index 93ec1e8770..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "context" -) - -// Pause suspends all processes inside the container. -func (r *Runhcs) Pause(context context.Context, id string) error { - return r.runOrError(r.command(context, "pause", id)) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go deleted file mode 100644 index b60dabe8f5..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "context" - "encoding/json" - "fmt" -) - -// Ps displays the processes running inside a container. -func (r *Runhcs) Ps(context context.Context, id string) ([]int, error) { - data, err := cmdOutput(r.command(context, "ps", "--format=json", id), true) - if err != nil { - return nil, fmt.Errorf("%s: %s", err, data) - } - var out []int - if err := json.Unmarshal(data, &out); err != nil { - return nil, err - } - return out, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go deleted file mode 100644 index 016b948056..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "context" - "strconv" -) - -// ResizeTTYOpts is set of options that can be used with the ResizeTTY command. -type ResizeTTYOpts struct { - // Pid is the process pid (defaults to init pid). - Pid *int -} - -func (opt *ResizeTTYOpts) args() ([]string, error) { - var out []string - if opt.Pid != nil { - out = append(out, "--pid", strconv.Itoa(*opt.Pid)) - } - return out, nil -} - -// ResizeTTY updates the terminal size for a container process. -func (r *Runhcs) ResizeTTY(context context.Context, id string, width, height uint16, opts *ResizeTTYOpts) error { - args := []string{"resize-tty"} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - return r.runOrError(r.command(context, append(args, id, strconv.FormatUint(uint64(width), 10), strconv.FormatUint(uint64(height), 10))...)) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go deleted file mode 100644 index 0116d0a2de..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "context" -) - -// Resume resumes all processes that have been previously paused. -func (r *Runhcs) Resume(context context.Context, id string) error { - return r.runOrError(r.command(context, "resume", id)) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go deleted file mode 100644 index 98de529de7..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "context" -) - -// Start will start an already created container. -func (r *Runhcs) Start(context context.Context, id string) error { - return r.runOrError(r.command(context, "start", id)) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go deleted file mode 100644 index cc18801ec1..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build windows - -package runhcs - -import ( - "context" - "encoding/json" - "fmt" -) - -// State outputs the state of a container. -func (r *Runhcs) State(context context.Context, id string) (*ContainerState, error) { - data, err := cmdOutput(r.command(context, "state", id), true) - if err != nil { - return nil, fmt.Errorf("%s: %s", err, data) - } - var out ContainerState - if err := json.Unmarshal(data, &out); err != nil { - return nil, err - } - return &out, nil -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/doc.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/doc.go deleted file mode 100644 index 0ec1aa05c4..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package ociwclayer provides functions for importing and exporting Windows -// container layers from and to their OCI tar representation. -package ociwclayer diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go deleted file mode 100644 index baa2dff3ee..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go +++ /dev/null @@ -1,86 +0,0 @@ -//go:build windows - -package ociwclayer - -import ( - "archive/tar" - "context" - "io" - "path/filepath" - - "github.com/Microsoft/go-winio/backuptar" - "github.com/Microsoft/hcsshim/internal/wclayer" -) - -// ExportLayerToTar writes an OCI layer tar stream from the provided on-disk layer. -// The caller must specify the parent layers, if any, ordered from lowest to -// highest layer. -// -// The layer will be mounted for this process, so the caller should ensure that -// it is not currently mounted. -func ExportLayerToTar(ctx context.Context, w io.Writer, path string, parentLayerPaths []string) error { - err := wclayer.ActivateLayer(ctx, path) - if err != nil { - return err - } - defer func() { - _ = wclayer.DeactivateLayer(ctx, path) - }() - - // Prepare and unprepare the layer to ensure that it has been initialized. - err = wclayer.PrepareLayer(ctx, path, parentLayerPaths) - if err != nil { - return err - } - err = wclayer.UnprepareLayer(ctx, path) - if err != nil { - return err - } - - r, err := wclayer.NewLayerReader(ctx, path, parentLayerPaths) - if err != nil { - return err - } - - err = writeTarFromLayer(ctx, r, w) - cerr := r.Close() - if err != nil { - return err - } - return cerr -} - -func writeTarFromLayer(ctx context.Context, r wclayer.LayerReader, w io.Writer) error { - t := tar.NewWriter(w) - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - name, size, fileInfo, err := r.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - if fileInfo == nil { - // Write a whiteout file. - hdr := &tar.Header{ - Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), whiteoutPrefix+filepath.Base(name))), - } - err := t.WriteHeader(hdr) - if err != nil { - return err - } - } else { - err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) - if err != nil { - return err - } - } - } - return t.Close() -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go deleted file mode 100644 index c9fb6df276..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go +++ /dev/null @@ -1,150 +0,0 @@ -//go:build windows - -package ociwclayer - -import ( - "archive/tar" - "bufio" - "context" - "io" - "os" - "path" - "path/filepath" - "strings" - - winio "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/backuptar" - "github.com/Microsoft/hcsshim/internal/wclayer" -) - -const whiteoutPrefix = ".wh." - -var ( - // mutatedFiles is a list of files that are mutated by the import process - // and must be backed up and restored. - mutatedFiles = map[string]string{ - "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", - } -) - -// ImportLayerFromTar reads a layer from an OCI layer tar stream and extracts it to the -// specified path. The caller must specify the parent layers, if any, ordered -// from lowest to highest layer. -// -// The caller must ensure that the thread or process has acquired backup and -// restore privileges. -// -// This function returns the total size of the layer's files, in bytes. -func ImportLayerFromTar(ctx context.Context, r io.Reader, path string, parentLayerPaths []string) (int64, error) { - err := os.MkdirAll(path, 0) - if err != nil { - return 0, err - } - w, err := wclayer.NewLayerWriter(ctx, path, parentLayerPaths) - if err != nil { - return 0, err - } - n, err := writeLayerFromTar(ctx, r, w, path) - cerr := w.Close() - if err != nil { - return 0, err - } - if cerr != nil { - return 0, cerr - } - return n, nil -} - -func writeLayerFromTar(ctx context.Context, r io.Reader, w wclayer.LayerWriter, root string) (int64, error) { - t := tar.NewReader(r) - hdr, err := t.Next() - totalSize := int64(0) - buf := bufio.NewWriter(nil) - for err == nil { - select { - case <-ctx.Done(): - return 0, ctx.Err() - default: - } - - base := path.Base(hdr.Name) - if strings.HasPrefix(base, whiteoutPrefix) { - name := path.Join(path.Dir(hdr.Name), base[len(whiteoutPrefix):]) - err = w.Remove(filepath.FromSlash(name)) - if err != nil { - return 0, err - } - hdr, err = t.Next() - } else if hdr.Typeflag == tar.TypeLink { - err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) - if err != nil { - return 0, err - } - hdr, err = t.Next() - } else { - var ( - name string - size int64 - fileInfo *winio.FileBasicInfo - ) - name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) - if err != nil { - return 0, err - } - err = w.Add(filepath.FromSlash(name), fileInfo) - if err != nil { - return 0, err - } - hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) - totalSize += size - } - } - if err != io.EOF { - return 0, err - } - return totalSize, nil -} - -// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and -// writes it to a backup stream, and also saves any files that will be mutated -// by the import layer process to a backup location. -func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { - var bcdBackup *os.File - var bcdBackupWriter *winio.BackupFileWriter - if backupPath, ok := mutatedFiles[hdr.Name]; ok { - bcdBackup, err = os.Create(filepath.Join(root, backupPath)) - if err != nil { - return nil, err - } - defer func() { - cerr := bcdBackup.Close() - if err == nil { - err = cerr - } - }() - - bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) - defer func() { - cerr := bcdBackupWriter.Close() - if err == nil { - err = cerr - } - }() - - buf.Reset(io.MultiWriter(w, bcdBackupWriter)) - } else { - buf.Reset(w) - } - - defer func() { - ferr := buf.Flush() - if err == nil { - err = ferr - } - }() - - return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/octtrpc/interceptor.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/octtrpc/interceptor.go deleted file mode 100644 index 673b29b5a6..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/pkg/octtrpc/interceptor.go +++ /dev/null @@ -1,117 +0,0 @@ -package octtrpc - -import ( - "context" - "encoding/base64" - "strings" - - "github.com/containerd/ttrpc" - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/Microsoft/hcsshim/internal/oc" -) - -type options struct { - sampler trace.Sampler -} - -// Option represents an option function that can be used with the OC TTRPC -// interceptors. -type Option func(*options) - -// WithSampler returns an option function to set the OC sampler used for the -// auto-created spans. -func WithSampler(sampler trace.Sampler) Option { - return func(opts *options) { - opts.sampler = sampler - } -} - -const metadataTraceContextKey = "octtrpc.tracecontext" - -func convertMethodName(name string) string { - name = strings.TrimPrefix(name, "/") - name = strings.Replace(name, "/", ".", -1) - return name -} - -func getParentSpanFromContext(ctx context.Context) (trace.SpanContext, bool) { - md, _ := ttrpc.GetMetadata(ctx) - traceContext := md[metadataTraceContextKey] - if len(traceContext) > 0 { - traceContextBinary, _ := base64.StdEncoding.DecodeString(traceContext[0]) - return propagation.FromBinary(traceContextBinary) - } - return trace.SpanContext{}, false -} - -func setSpanStatus(span *trace.Span, err error) { - // This error handling matches that used in ocgrpc. - if err != nil { - s, ok := status.FromError(err) - if ok { - span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) - } else { - span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: err.Error()}) - } - } -} - -// ClientInterceptor returns a TTRPC unary client interceptor that automatically -// creates a new span for outgoing TTRPC calls, and passes the span context as -// metadata on the call. -func ClientInterceptor(opts ...Option) ttrpc.UnaryClientInterceptor { - o := options{ - sampler: oc.DefaultSampler, - } - for _, opt := range opts { - opt(&o) - } - return func(ctx context.Context, req *ttrpc.Request, resp *ttrpc.Response, info *ttrpc.UnaryClientInfo, inv ttrpc.Invoker) (err error) { - ctx, span := oc.StartSpan( - ctx, - convertMethodName(info.FullMethod), - trace.WithSampler(o.sampler), - oc.WithClientSpanKind) - defer span.End() - defer setSpanStatus(span, err) - - spanContextBinary := propagation.Binary(span.SpanContext()) - b64 := base64.StdEncoding.EncodeToString(spanContextBinary) - kvp := &ttrpc.KeyValue{Key: metadataTraceContextKey, Value: b64} - req.Metadata = append(req.Metadata, kvp) - - return inv(ctx, req, resp) - } -} - -// ServerInterceptor returns a TTRPC unary server interceptor that automatically -// creates a new span for incoming TTRPC calls, and parents the span to the -// span context received via metadata, if it exists. -func ServerInterceptor(opts ...Option) ttrpc.UnaryServerInterceptor { - o := options{ - sampler: oc.DefaultSampler, - } - for _, opt := range opts { - opt(&o) - } - return func(ctx context.Context, unmarshal ttrpc.Unmarshaler, info *ttrpc.UnaryServerInfo, method ttrpc.Method) (_ interface{}, err error) { - name := convertMethodName(info.FullMethod) - - var span *trace.Span - opts := []trace.StartOption{trace.WithSampler(o.sampler), oc.WithServerSpanKind} - parent, ok := getParentSpanFromContext(ctx) - if ok { - ctx, span = oc.StartSpanWithRemoteParent(ctx, name, parent, opts...) - } else { - ctx, span = oc.StartSpan(ctx, name, opts...) - } - defer span.End() - defer setSpanStatus(span, err) - - return method(ctx, unmarshal) - } -} diff --git a/test/vendor/github.com/Microsoft/hcsshim/process.go b/test/vendor/github.com/Microsoft/hcsshim/process.go deleted file mode 100644 index 44df91cde2..0000000000 --- a/test/vendor/github.com/Microsoft/hcsshim/process.go +++ /dev/null @@ -1,100 +0,0 @@ -//go:build windows - -package hcsshim - -import ( - "context" - "io" - "sync" - "time" - - "github.com/Microsoft/hcsshim/internal/hcs" -) - -// ContainerError is an error encountered in HCS -type process struct { - p *hcs.Process - waitOnce sync.Once - waitCh chan struct{} - waitErr error -} - -// Pid returns the process ID of the process within the container. -func (process *process) Pid() int { - return process.p.Pid() -} - -// Kill signals the process to terminate but does not wait for it to finish terminating. -func (process *process) Kill() error { - found, err := process.p.Kill(context.Background()) - if err != nil { - return convertProcessError(err, process) - } - if !found { - return &ProcessError{Process: process, Err: ErrElementNotFound, Operation: "hcsshim::Process::Kill"} - } - return nil -} - -// Wait waits for the process to exit. -func (process *process) Wait() error { - return convertProcessError(process.p.Wait(), process) -} - -// WaitTimeout waits for the process to exit or the duration to elapse. It returns -// false if timeout occurs. -func (process *process) WaitTimeout(timeout time.Duration) error { - process.waitOnce.Do(func() { - process.waitCh = make(chan struct{}) - go func() { - process.waitErr = process.Wait() - close(process.waitCh) - }() - }) - t := time.NewTimer(timeout) - defer t.Stop() - select { - case <-t.C: - return &ProcessError{Process: process, Err: ErrTimeout, Operation: "hcsshim::Process::Wait"} - case <-process.waitCh: - return process.waitErr - } -} - -// ExitCode returns the exit code of the process. The process must have -// already terminated. -func (process *process) ExitCode() (int, error) { - code, err := process.p.ExitCode() - if err != nil { - err = convertProcessError(err, process) - } - return code, err -} - -// ResizeConsole resizes the console of the process. -func (process *process) ResizeConsole(width, height uint16) error { - return convertProcessError(process.p.ResizeConsole(context.Background(), width, height), process) -} - -// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing -// these pipes does not close the underlying pipes; it should be possible to -// call this multiple times to get multiple interfaces. -func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) { - stdin, stdout, stderr, err := process.p.StdioLegacy() - if err != nil { - err = convertProcessError(err, process) - } - return stdin, stdout, stderr, err -} - -// CloseStdin closes the write side of the stdin pipe so that the process is -// notified on the read side that there is no more data in stdin. -func (process *process) CloseStdin() error { - return convertProcessError(process.p.CloseStdin(context.Background()), process) -} - -// Close cleans up any state associated with the process but does not kill -// or wait on it. -func (process *process) Close() error { - return convertProcessError(process.p.Close(), process) -} diff --git a/test/vendor/github.com/containerd/containerd/.golangci.yml b/test/vendor/github.com/containerd/containerd/.golangci.yml deleted file mode 100644 index 4eba7d8d19..0000000000 --- a/test/vendor/github.com/containerd/containerd/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters: - enable: - - structcheck - - varcheck - - staticcheck - - unconvert - - gofmt - - goimports - - revive - - ineffassign - - vet - - unused - - misspell - disable: - - errcheck - -issues: - include: - - EXC0002 - -run: - timeout: 3m - skip-dirs: - - api - - design - - docs - - docs/man diff --git a/test/vendor/github.com/containerd/containerd/Vagrantfile b/test/vendor/github.com/containerd/containerd/Vagrantfile deleted file mode 100644 index e294fe111f..0000000000 --- a/test/vendor/github.com/containerd/containerd/Vagrantfile +++ /dev/null @@ -1,260 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Copyright The containerd Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Vagrantfile for cgroup2 and SELinux -Vagrant.configure("2") do |config| - config.vm.box = "fedora/34-cloud-base" - memory = 4096 - cpus = 2 - config.vm.provider :virtualbox do |v| - v.memory = memory - v.cpus = cpus - end - config.vm.provider :libvirt do |v| - v.memory = memory - v.cpus = cpus - end - - # Disabled by default. To run: - # vagrant up --provision-with=upgrade-packages - # To upgrade only specific packages: - # UPGRADE_PACKAGES=selinux vagrant up --provision-with=upgrade-packages - # - config.vm.provision "upgrade-packages", type: "shell", run: "never" do |sh| - sh.upload_path = "/tmp/vagrant-upgrade-packages" - sh.env = { - 'UPGRADE_PACKAGES': ENV['UPGRADE_PACKAGES'], - } - sh.inline = <<~SHELL - #!/usr/bin/env bash - set -eux -o pipefail - dnf -y upgrade ${UPGRADE_PACKAGES} - SHELL - end - - # To re-run, installing CNI from RPM: - # INSTALL_PACKAGES="containernetworking-plugins" vagrant up --provision-with=install-packages - # - config.vm.provision "install-packages", type: "shell", run: "once" do |sh| - sh.upload_path = "/tmp/vagrant-install-packages" - sh.env = { - 'INSTALL_PACKAGES': ENV['INSTALL_PACKAGES'], - } - sh.inline = <<~SHELL - #!/usr/bin/env bash - set -eux -o pipefail - dnf -y install \ - container-selinux \ - curl \ - gcc \ - git \ - iptables \ - libseccomp-devel \ - libselinux-devel \ - lsof \ - make \ - ${INSTALL_PACKAGES} - SHELL - end - - # To re-run this provisioner, installing a different version of go: - # GO_VERSION="1.14.6" vagrant up --provision-with=install-golang - # - config.vm.provision "install-golang", type: "shell", run: "once" do |sh| - sh.upload_path = "/tmp/vagrant-install-golang" - sh.env = { - 'GO_VERSION': ENV['GO_VERSION'] || "1.16.14", - } - sh.inline = <<~SHELL - #!/usr/bin/env bash - set -eux -o pipefail - curl -fsSL "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" | tar Cxz /usr/local - cat >> /etc/environment <> /etc/profile.d/sh.local < /tmp/containerd.log - systemctl stop containerd - } - selinux=$(getenforce) - if [[ $selinux == Enforcing ]]; then - setenforce 0 - fi - systemctl enable --now ${GOPATH}/src/github.com/containerd/containerd/containerd.service - if [[ $selinux == Enforcing ]]; then - setenforce 1 - fi - trap cleanup EXIT - ctr version - critest --parallel=$(nproc) --report-dir="${REPORT_DIR}" --ginkgo.skip='HostIpc is true' - SHELL - end - -end diff --git a/test/vendor/github.com/containerd/containerd/oci/spec_opts.go b/test/vendor/github.com/containerd/containerd/oci/spec_opts.go deleted file mode 100644 index 4199a85d93..0000000000 --- a/test/vendor/github.com/containerd/containerd/oci/spec_opts.go +++ /dev/null @@ -1,1292 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package oci - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/containerd/containerd/containers" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/platforms" - "github.com/containerd/continuity/fs" - v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/opencontainers/runc/libcontainer/user" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -// SpecOpts sets spec specific information to a newly generated OCI spec -type SpecOpts func(context.Context, Client, *containers.Container, *Spec) error - -// Compose converts a sequence of spec operations into a single operation -func Compose(opts ...SpecOpts) SpecOpts { - return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { - for _, o := range opts { - if err := o(ctx, client, c, s); err != nil { - return err - } - } - return nil - } -} - -// setProcess sets Process to empty if unset -func setProcess(s *Spec) { - if s.Process == nil { - s.Process = &specs.Process{} - } -} - -// setRoot sets Root to empty if unset -func setRoot(s *Spec) { - if s.Root == nil { - s.Root = &specs.Root{} - } -} - -// setLinux sets Linux to empty if unset -func setLinux(s *Spec) { - if s.Linux == nil { - s.Linux = &specs.Linux{} - } -} - -// nolint -func setResources(s *Spec) { - if s.Linux != nil { - if s.Linux.Resources == nil { - s.Linux.Resources = &specs.LinuxResources{} - } - } - if s.Windows != nil { - if s.Windows.Resources == nil { - s.Windows.Resources = &specs.WindowsResources{} - } - } -} - -// nolint -func setCPU(s *Spec) { - setResources(s) - if s.Linux != nil { - if s.Linux.Resources.CPU == nil { - s.Linux.Resources.CPU = &specs.LinuxCPU{} - } - } - if s.Windows != nil { - if s.Windows.Resources.CPU == nil { - s.Windows.Resources.CPU = &specs.WindowsCPUResources{} - } - } -} - -// setCapabilities sets Linux Capabilities to empty if unset -func setCapabilities(s *Spec) { - setProcess(s) - if s.Process.Capabilities == nil { - s.Process.Capabilities = &specs.LinuxCapabilities{} - } -} - -// WithDefaultSpec returns a SpecOpts that will populate the spec with default -// values. -// -// Use as the first option to clear the spec, then apply options afterwards. -func WithDefaultSpec() SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - return generateDefaultSpecWithPlatform(ctx, platforms.DefaultString(), c.ID, s) - } -} - -// WithDefaultSpecForPlatform returns a SpecOpts that will populate the spec -// with default values for a given platform. -// -// Use as the first option to clear the spec, then apply options afterwards. -func WithDefaultSpecForPlatform(platform string) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - return generateDefaultSpecWithPlatform(ctx, platform, c.ID, s) - } -} - -// WithSpecFromBytes loads the spec from the provided byte slice. -func WithSpecFromBytes(p []byte) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - *s = Spec{} // make sure spec is cleared. - if err := json.Unmarshal(p, s); err != nil { - return errors.Wrapf(err, "decoding spec config file failed, current supported OCI runtime-spec : v%s", specs.Version) - } - return nil - } -} - -// WithSpecFromFile loads the specification from the provided filename. -func WithSpecFromFile(filename string) SpecOpts { - return func(ctx context.Context, c Client, container *containers.Container, s *Spec) error { - p, err := ioutil.ReadFile(filename) - if err != nil { - return errors.Wrap(err, "cannot load spec config file") - } - return WithSpecFromBytes(p)(ctx, c, container, s) - } -} - -// WithEnv appends environment variables -func WithEnv(environmentVariables []string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - if len(environmentVariables) > 0 { - setProcess(s) - s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, environmentVariables) - } - return nil - } -} - -// WithDefaultPathEnv sets the $PATH environment variable to the -// default PATH defined in this package. -func WithDefaultPathEnv(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, defaultUnixEnv) - return nil -} - -// replaceOrAppendEnvValues returns the defaults with the overrides either -// replaced by env key or appended to the list -func replaceOrAppendEnvValues(defaults, overrides []string) []string { - cache := make(map[string]int, len(defaults)) - results := make([]string, 0, len(defaults)) - for i, e := range defaults { - parts := strings.SplitN(e, "=", 2) - results = append(results, e) - cache[parts[0]] = i - } - - for _, value := range overrides { - // Values w/o = means they want this env to be removed/unset. - if !strings.Contains(value, "=") { - if i, exists := cache[value]; exists { - results[i] = "" // Used to indicate it should be removed - } - continue - } - - // Just do a normal set/update - parts := strings.SplitN(value, "=", 2) - if i, exists := cache[parts[0]]; exists { - results[i] = value - } else { - results = append(results, value) - } - } - - // Now remove all entries that we want to "unset" - for i := 0; i < len(results); i++ { - if results[i] == "" { - results = append(results[:i], results[i+1:]...) - i-- - } - } - - return results -} - -// WithProcessArgs replaces the args on the generated spec -func WithProcessArgs(args ...string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setProcess(s) - s.Process.Args = args - return nil - } -} - -// WithProcessCwd replaces the current working directory on the generated spec -func WithProcessCwd(cwd string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setProcess(s) - s.Process.Cwd = cwd - return nil - } -} - -// WithTTY sets the information on the spec as well as the environment variables for -// using a TTY -func WithTTY(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setProcess(s) - s.Process.Terminal = true - if s.Linux != nil { - s.Process.Env = append(s.Process.Env, "TERM=xterm") - } - - return nil -} - -// WithTTYSize sets the information on the spec as well as the environment variables for -// using a TTY -func WithTTYSize(width, height int) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setProcess(s) - if s.Process.ConsoleSize == nil { - s.Process.ConsoleSize = &specs.Box{} - } - s.Process.ConsoleSize.Width = uint(width) - s.Process.ConsoleSize.Height = uint(height) - return nil - } -} - -// WithHostname sets the container's hostname -func WithHostname(name string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - s.Hostname = name - return nil - } -} - -// WithMounts appends mounts -func WithMounts(mounts []specs.Mount) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - s.Mounts = append(s.Mounts, mounts...) - return nil - } -} - -// WithoutMounts removes mounts -func WithoutMounts(dests ...string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - var ( - mounts []specs.Mount - current = s.Mounts - ) - mLoop: - for _, m := range current { - mDestination := filepath.Clean(m.Destination) - for _, dest := range dests { - if mDestination == dest { - continue mLoop - } - } - mounts = append(mounts, m) - } - s.Mounts = mounts - return nil - } -} - -// WithHostNamespace allows a task to run inside the host's linux namespace -func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setLinux(s) - for i, n := range s.Linux.Namespaces { - if n.Type == ns { - s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...) - return nil - } - } - return nil - } -} - -// WithLinuxNamespace uses the passed in namespace for the spec. If a namespace of the same type already exists in the -// spec, the existing namespace is replaced by the one provided. -func WithLinuxNamespace(ns specs.LinuxNamespace) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setLinux(s) - for i, n := range s.Linux.Namespaces { - if n.Type == ns.Type { - s.Linux.Namespaces[i] = ns - return nil - } - } - s.Linux.Namespaces = append(s.Linux.Namespaces, ns) - return nil - } -} - -// WithNewPrivileges turns off the NoNewPrivileges feature flag in the spec -func WithNewPrivileges(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setProcess(s) - s.Process.NoNewPrivileges = false - - return nil -} - -// WithImageConfig configures the spec to from the configuration of an Image -func WithImageConfig(image Image) SpecOpts { - return WithImageConfigArgs(image, nil) -} - -// WithImageConfigArgs configures the spec to from the configuration of an Image with additional args that -// replaces the CMD of the image -func WithImageConfigArgs(image Image, args []string) SpecOpts { - return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { - ic, err := image.Config(ctx) - if err != nil { - return err - } - var ( - ociimage v1.Image - config v1.ImageConfig - ) - switch ic.MediaType { - case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config: - p, err := content.ReadBlob(ctx, image.ContentStore(), ic) - if err != nil { - return err - } - - if err := json.Unmarshal(p, &ociimage); err != nil { - return err - } - config = ociimage.Config - default: - return fmt.Errorf("unknown image config media type %s", ic.MediaType) - } - - setProcess(s) - if s.Linux != nil { - defaults := config.Env - if len(defaults) == 0 { - defaults = defaultUnixEnv - } - s.Process.Env = replaceOrAppendEnvValues(defaults, s.Process.Env) - cmd := config.Cmd - if len(args) > 0 { - cmd = args - } - s.Process.Args = append(config.Entrypoint, cmd...) - - cwd := config.WorkingDir - if cwd == "" { - cwd = "/" - } - s.Process.Cwd = cwd - if config.User != "" { - if err := WithUser(config.User)(ctx, client, c, s); err != nil { - return err - } - return WithAdditionalGIDs(fmt.Sprintf("%d", s.Process.User.UID))(ctx, client, c, s) - } - // we should query the image's /etc/group for additional GIDs - // even if there is no specified user in the image config - return WithAdditionalGIDs("root")(ctx, client, c, s) - } else if s.Windows != nil { - s.Process.Env = replaceOrAppendEnvValues(config.Env, s.Process.Env) - cmd := config.Cmd - if len(args) > 0 { - cmd = args - } - s.Process.Args = append(config.Entrypoint, cmd...) - - s.Process.Cwd = config.WorkingDir - s.Process.User = specs.User{ - Username: config.User, - } - } else { - return errors.New("spec does not contain Linux or Windows section") - } - return nil - } -} - -// WithRootFSPath specifies unmanaged rootfs path. -func WithRootFSPath(path string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setRoot(s) - s.Root.Path = path - // Entrypoint is not set here (it's up to caller) - return nil - } -} - -// WithRootFSReadonly sets specs.Root.Readonly to true -func WithRootFSReadonly() SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setRoot(s) - s.Root.Readonly = true - return nil - } -} - -// WithNoNewPrivileges sets no_new_privileges on the process for the container -func WithNoNewPrivileges(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setProcess(s) - s.Process.NoNewPrivileges = true - return nil -} - -// WithHostHostsFile bind-mounts the host's /etc/hosts into the container as readonly -func WithHostHostsFile(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - s.Mounts = append(s.Mounts, specs.Mount{ - Destination: "/etc/hosts", - Type: "bind", - Source: "/etc/hosts", - Options: []string{"rbind", "ro"}, - }) - return nil -} - -// WithHostResolvconf bind-mounts the host's /etc/resolv.conf into the container as readonly -func WithHostResolvconf(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - s.Mounts = append(s.Mounts, specs.Mount{ - Destination: "/etc/resolv.conf", - Type: "bind", - Source: "/etc/resolv.conf", - Options: []string{"rbind", "ro"}, - }) - return nil -} - -// WithHostLocaltime bind-mounts the host's /etc/localtime into the container as readonly -func WithHostLocaltime(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - s.Mounts = append(s.Mounts, specs.Mount{ - Destination: "/etc/localtime", - Type: "bind", - Source: "/etc/localtime", - Options: []string{"rbind", "ro"}, - }) - return nil -} - -// WithUserNamespace sets the uid and gid mappings for the task -// this can be called multiple times to add more mappings to the generated spec -func WithUserNamespace(uidMap, gidMap []specs.LinuxIDMapping) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - var hasUserns bool - setLinux(s) - for _, ns := range s.Linux.Namespaces { - if ns.Type == specs.UserNamespace { - hasUserns = true - break - } - } - if !hasUserns { - s.Linux.Namespaces = append(s.Linux.Namespaces, specs.LinuxNamespace{ - Type: specs.UserNamespace, - }) - } - s.Linux.UIDMappings = append(s.Linux.UIDMappings, uidMap...) - s.Linux.GIDMappings = append(s.Linux.GIDMappings, gidMap...) - return nil - } -} - -// WithCgroup sets the container's cgroup path -func WithCgroup(path string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setLinux(s) - s.Linux.CgroupsPath = path - return nil - } -} - -// WithNamespacedCgroup uses the namespace set on the context to create a -// root directory for containers in the cgroup with the id as the subcgroup -func WithNamespacedCgroup() SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - namespace, err := namespaces.NamespaceRequired(ctx) - if err != nil { - return err - } - setLinux(s) - s.Linux.CgroupsPath = filepath.Join("/", namespace, c.ID) - return nil - } -} - -// WithUser sets the user to be used within the container. -// It accepts a valid user string in OCI Image Spec v1.0.0: -// user, uid, user:group, uid:gid, uid:group, user:gid -func WithUser(userstr string) SpecOpts { - return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { - setProcess(s) - parts := strings.Split(userstr, ":") - switch len(parts) { - case 1: - v, err := strconv.Atoi(parts[0]) - if err != nil { - // if we cannot parse as a uint they try to see if it is a username - return WithUsername(userstr)(ctx, client, c, s) - } - return WithUserID(uint32(v))(ctx, client, c, s) - case 2: - var ( - username string - groupname string - ) - var uid, gid uint32 - v, err := strconv.Atoi(parts[0]) - if err != nil { - username = parts[0] - } else { - uid = uint32(v) - } - if v, err = strconv.Atoi(parts[1]); err != nil { - groupname = parts[1] - } else { - gid = uint32(v) - } - if username == "" && groupname == "" { - s.Process.User.UID, s.Process.User.GID = uid, gid - return nil - } - f := func(root string) error { - if username != "" { - user, err := UserFromPath(root, func(u user.User) bool { - return u.Name == username - }) - if err != nil { - return err - } - uid = uint32(user.Uid) - } - if groupname != "" { - gid, err = GIDFromPath(root, func(g user.Group) bool { - return g.Name == groupname - }) - if err != nil { - return err - } - } - s.Process.User.UID, s.Process.User.GID = uid, gid - return nil - } - if c.Snapshotter == "" && c.SnapshotKey == "" { - if !isRootfsAbs(s.Root.Path) { - return errors.New("rootfs absolute path is required") - } - return f(s.Root.Path) - } - if c.Snapshotter == "" { - return errors.New("no snapshotter set for container") - } - if c.SnapshotKey == "" { - return errors.New("rootfs snapshot not created for container") - } - snapshotter := client.SnapshotService(c.Snapshotter) - mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) - if err != nil { - return err - } - - mounts = tryReadonlyMounts(mounts) - return mount.WithTempMount(ctx, mounts, f) - default: - return fmt.Errorf("invalid USER value %s", userstr) - } - } -} - -// WithUIDGID allows the UID and GID for the Process to be set -func WithUIDGID(uid, gid uint32) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setProcess(s) - s.Process.User.UID = uid - s.Process.User.GID = gid - return nil - } -} - -// WithUserID sets the correct UID and GID for the container based -// on the image's /etc/passwd contents. If /etc/passwd does not exist, -// or uid is not found in /etc/passwd, it sets the requested uid, -// additionally sets the gid to 0, and does not return an error. -func WithUserID(uid uint32) SpecOpts { - return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { - setProcess(s) - if c.Snapshotter == "" && c.SnapshotKey == "" { - if !isRootfsAbs(s.Root.Path) { - return errors.Errorf("rootfs absolute path is required") - } - user, err := UserFromPath(s.Root.Path, func(u user.User) bool { - return u.Uid == int(uid) - }) - if err != nil { - if os.IsNotExist(err) || err == ErrNoUsersFound { - s.Process.User.UID, s.Process.User.GID = uid, 0 - return nil - } - return err - } - s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) - return nil - - } - if c.Snapshotter == "" { - return errors.Errorf("no snapshotter set for container") - } - if c.SnapshotKey == "" { - return errors.Errorf("rootfs snapshot not created for container") - } - snapshotter := client.SnapshotService(c.Snapshotter) - mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) - if err != nil { - return err - } - - mounts = tryReadonlyMounts(mounts) - return mount.WithTempMount(ctx, mounts, func(root string) error { - user, err := UserFromPath(root, func(u user.User) bool { - return u.Uid == int(uid) - }) - if err != nil { - if os.IsNotExist(err) || err == ErrNoUsersFound { - s.Process.User.UID, s.Process.User.GID = uid, 0 - return nil - } - return err - } - s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) - return nil - }) - } -} - -// WithUsername sets the correct UID and GID for the container -// based on the image's /etc/passwd contents. If /etc/passwd -// does not exist, or the username is not found in /etc/passwd, -// it returns error. -func WithUsername(username string) SpecOpts { - return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { - setProcess(s) - if s.Linux != nil { - if c.Snapshotter == "" && c.SnapshotKey == "" { - if !isRootfsAbs(s.Root.Path) { - return errors.Errorf("rootfs absolute path is required") - } - user, err := UserFromPath(s.Root.Path, func(u user.User) bool { - return u.Name == username - }) - if err != nil { - return err - } - s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) - return nil - } - if c.Snapshotter == "" { - return errors.Errorf("no snapshotter set for container") - } - if c.SnapshotKey == "" { - return errors.Errorf("rootfs snapshot not created for container") - } - snapshotter := client.SnapshotService(c.Snapshotter) - mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) - if err != nil { - return err - } - - mounts = tryReadonlyMounts(mounts) - return mount.WithTempMount(ctx, mounts, func(root string) error { - user, err := UserFromPath(root, func(u user.User) bool { - return u.Name == username - }) - if err != nil { - return err - } - s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) - return nil - }) - } else if s.Windows != nil { - s.Process.User.Username = username - } else { - return errors.New("spec does not contain Linux or Windows section") - } - return nil - } -} - -// WithAdditionalGIDs sets the OCI spec's additionalGids array to any additional groups listed -// for a particular user in the /etc/groups file of the image's root filesystem -// The passed in user can be either a uid or a username. -func WithAdditionalGIDs(userstr string) SpecOpts { - return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { - // For LCOW additional GID's not supported - if s.Windows != nil { - return nil - } - setProcess(s) - setAdditionalGids := func(root string) error { - var username string - uid, err := strconv.Atoi(userstr) - if err == nil { - user, err := UserFromPath(root, func(u user.User) bool { - return u.Uid == uid - }) - if err != nil { - if os.IsNotExist(err) || err == ErrNoUsersFound { - return nil - } - return err - } - username = user.Name - } else { - username = userstr - } - gids, err := getSupplementalGroupsFromPath(root, func(g user.Group) bool { - // we only want supplemental groups - if g.Name == username { - return false - } - for _, entry := range g.List { - if entry == username { - return true - } - } - return false - }) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - s.Process.User.AdditionalGids = gids - return nil - } - if c.Snapshotter == "" && c.SnapshotKey == "" { - if !isRootfsAbs(s.Root.Path) { - return errors.Errorf("rootfs absolute path is required") - } - return setAdditionalGids(s.Root.Path) - } - if c.Snapshotter == "" { - return errors.Errorf("no snapshotter set for container") - } - if c.SnapshotKey == "" { - return errors.Errorf("rootfs snapshot not created for container") - } - snapshotter := client.SnapshotService(c.Snapshotter) - mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) - if err != nil { - return err - } - - mounts = tryReadonlyMounts(mounts) - return mount.WithTempMount(ctx, mounts, setAdditionalGids) - } -} - -// WithCapabilities sets Linux capabilities on the process -func WithCapabilities(caps []string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setCapabilities(s) - - s.Process.Capabilities.Bounding = caps - s.Process.Capabilities.Effective = caps - s.Process.Capabilities.Permitted = caps - s.Process.Capabilities.Inheritable = caps - - return nil - } -} - -func capsContain(caps []string, s string) bool { - for _, c := range caps { - if c == s { - return true - } - } - return false -} - -func removeCap(caps *[]string, s string) { - var newcaps []string - for _, c := range *caps { - if c == s { - continue - } - newcaps = append(newcaps, c) - } - *caps = newcaps -} - -// WithAddedCapabilities adds the provided capabilities -func WithAddedCapabilities(caps []string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setCapabilities(s) - for _, c := range caps { - for _, cl := range []*[]string{ - &s.Process.Capabilities.Bounding, - &s.Process.Capabilities.Effective, - &s.Process.Capabilities.Permitted, - &s.Process.Capabilities.Inheritable, - } { - if !capsContain(*cl, c) { - *cl = append(*cl, c) - } - } - } - return nil - } -} - -// WithDroppedCapabilities removes the provided capabilities -func WithDroppedCapabilities(caps []string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setCapabilities(s) - for _, c := range caps { - for _, cl := range []*[]string{ - &s.Process.Capabilities.Bounding, - &s.Process.Capabilities.Effective, - &s.Process.Capabilities.Permitted, - &s.Process.Capabilities.Inheritable, - } { - removeCap(cl, c) - } - } - return nil - } -} - -// WithAmbientCapabilities set the Linux ambient capabilities for the process -// Ambient capabilities should only be set for non-root users or the caller should -// understand how these capabilities are used and set -func WithAmbientCapabilities(caps []string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setCapabilities(s) - - s.Process.Capabilities.Ambient = caps - return nil - } -} - -// ErrNoUsersFound can be returned from UserFromPath -var ErrNoUsersFound = errors.New("no users found") - -// UserFromPath inspects the user object using /etc/passwd in the specified rootfs. -// filter can be nil. -func UserFromPath(root string, filter func(user.User) bool) (user.User, error) { - ppath, err := fs.RootPath(root, "/etc/passwd") - if err != nil { - return user.User{}, err - } - users, err := user.ParsePasswdFileFilter(ppath, filter) - if err != nil { - return user.User{}, err - } - if len(users) == 0 { - return user.User{}, ErrNoUsersFound - } - return users[0], nil -} - -// ErrNoGroupsFound can be returned from GIDFromPath -var ErrNoGroupsFound = errors.New("no groups found") - -// GIDFromPath inspects the GID using /etc/passwd in the specified rootfs. -// filter can be nil. -func GIDFromPath(root string, filter func(user.Group) bool) (gid uint32, err error) { - gpath, err := fs.RootPath(root, "/etc/group") - if err != nil { - return 0, err - } - groups, err := user.ParseGroupFileFilter(gpath, filter) - if err != nil { - return 0, err - } - if len(groups) == 0 { - return 0, ErrNoGroupsFound - } - g := groups[0] - return uint32(g.Gid), nil -} - -func getSupplementalGroupsFromPath(root string, filter func(user.Group) bool) ([]uint32, error) { - gpath, err := fs.RootPath(root, "/etc/group") - if err != nil { - return []uint32{}, err - } - groups, err := user.ParseGroupFileFilter(gpath, filter) - if err != nil { - return []uint32{}, err - } - if len(groups) == 0 { - // if there are no additional groups; just return an empty set - return []uint32{}, nil - } - addlGids := []uint32{} - for _, grp := range groups { - addlGids = append(addlGids, uint32(grp.Gid)) - } - return addlGids, nil -} - -func isRootfsAbs(root string) bool { - return filepath.IsAbs(root) -} - -// WithMaskedPaths sets the masked paths option -func WithMaskedPaths(paths []string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setLinux(s) - s.Linux.MaskedPaths = paths - return nil - } -} - -// WithReadonlyPaths sets the read only paths option -func WithReadonlyPaths(paths []string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setLinux(s) - s.Linux.ReadonlyPaths = paths - return nil - } -} - -// WithWriteableSysfs makes any sysfs mounts writeable -func WithWriteableSysfs(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - for _, m := range s.Mounts { - if m.Type == "sysfs" { - for i, o := range m.Options { - if o == "ro" { - m.Options[i] = "rw" - } - } - } - } - return nil -} - -// WithWriteableCgroupfs makes any cgroup mounts writeable -func WithWriteableCgroupfs(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - for _, m := range s.Mounts { - if m.Type == "cgroup" { - for i, o := range m.Options { - if o == "ro" { - m.Options[i] = "rw" - } - } - } - } - return nil -} - -// WithSelinuxLabel sets the process SELinux label -func WithSelinuxLabel(label string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setProcess(s) - s.Process.SelinuxLabel = label - return nil - } -} - -// WithApparmorProfile sets the Apparmor profile for the process -func WithApparmorProfile(profile string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setProcess(s) - s.Process.ApparmorProfile = profile - return nil - } -} - -// WithSeccompUnconfined clears the seccomp profile -func WithSeccompUnconfined(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setLinux(s) - s.Linux.Seccomp = nil - return nil -} - -// WithParentCgroupDevices uses the default cgroup setup to inherit the container's parent cgroup's -// allowed and denied devices -func WithParentCgroupDevices(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setLinux(s) - if s.Linux.Resources == nil { - s.Linux.Resources = &specs.LinuxResources{} - } - s.Linux.Resources.Devices = nil - return nil -} - -// WithAllDevicesAllowed permits READ WRITE MKNOD on all devices nodes for the container -func WithAllDevicesAllowed(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setLinux(s) - if s.Linux.Resources == nil { - s.Linux.Resources = &specs.LinuxResources{} - } - s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{ - { - Allow: true, - Access: rwm, - }, - } - return nil -} - -// WithDefaultUnixDevices adds the default devices for unix such as /dev/null, /dev/random to -// the container's resource cgroup spec -func WithDefaultUnixDevices(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setLinux(s) - if s.Linux.Resources == nil { - s.Linux.Resources = &specs.LinuxResources{} - } - intptr := func(i int64) *int64 { - return &i - } - s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, []specs.LinuxDeviceCgroup{ - { - // "/dev/null", - Type: "c", - Major: intptr(1), - Minor: intptr(3), - Access: rwm, - Allow: true, - }, - { - // "/dev/random", - Type: "c", - Major: intptr(1), - Minor: intptr(8), - Access: rwm, - Allow: true, - }, - { - // "/dev/full", - Type: "c", - Major: intptr(1), - Minor: intptr(7), - Access: rwm, - Allow: true, - }, - { - // "/dev/tty", - Type: "c", - Major: intptr(5), - Minor: intptr(0), - Access: rwm, - Allow: true, - }, - { - // "/dev/zero", - Type: "c", - Major: intptr(1), - Minor: intptr(5), - Access: rwm, - Allow: true, - }, - { - // "/dev/urandom", - Type: "c", - Major: intptr(1), - Minor: intptr(9), - Access: rwm, - Allow: true, - }, - { - // "/dev/console", - Type: "c", - Major: intptr(5), - Minor: intptr(1), - Access: rwm, - Allow: true, - }, - // /dev/pts/ - pts namespaces are "coming soon" - { - Type: "c", - Major: intptr(136), - Access: rwm, - Allow: true, - }, - { - Type: "c", - Major: intptr(5), - Minor: intptr(2), - Access: rwm, - Allow: true, - }, - { - // tuntap - Type: "c", - Major: intptr(10), - Minor: intptr(200), - Access: rwm, - Allow: true, - }, - }...) - return nil -} - -// WithPrivileged sets up options for a privileged container -var WithPrivileged = Compose( - WithAllCurrentCapabilities, - WithMaskedPaths(nil), - WithReadonlyPaths(nil), - WithWriteableSysfs, - WithWriteableCgroupfs, - WithSelinuxLabel(""), - WithApparmorProfile(""), - WithSeccompUnconfined, -) - -// WithWindowsHyperV sets the Windows.HyperV section for HyperV isolation of containers. -func WithWindowsHyperV(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - if s.Windows == nil { - s.Windows = &specs.Windows{} - } - if s.Windows.HyperV == nil { - s.Windows.HyperV = &specs.WindowsHyperV{} - } - return nil -} - -// WithMemoryLimit sets the `Linux.LinuxResources.Memory.Limit` section to the -// `limit` specified if the `Linux` section is not `nil`. Additionally sets the -// `Windows.WindowsResources.Memory.Limit` section if the `Windows` section is -// not `nil`. -func WithMemoryLimit(limit uint64) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - if s.Linux != nil { - if s.Linux.Resources == nil { - s.Linux.Resources = &specs.LinuxResources{} - } - if s.Linux.Resources.Memory == nil { - s.Linux.Resources.Memory = &specs.LinuxMemory{} - } - l := int64(limit) - s.Linux.Resources.Memory.Limit = &l - } - if s.Windows != nil { - if s.Windows.Resources == nil { - s.Windows.Resources = &specs.WindowsResources{} - } - if s.Windows.Resources.Memory == nil { - s.Windows.Resources.Memory = &specs.WindowsMemoryResources{} - } - s.Windows.Resources.Memory.Limit = &limit - } - return nil - } -} - -// WithAnnotations appends or replaces the annotations on the spec with the -// provided annotations -func WithAnnotations(annotations map[string]string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - if s.Annotations == nil { - s.Annotations = make(map[string]string) - } - for k, v := range annotations { - s.Annotations[k] = v - } - return nil - } -} - -// WithLinuxDevices adds the provided linux devices to the spec -func WithLinuxDevices(devices []specs.LinuxDevice) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setLinux(s) - s.Linux.Devices = append(s.Linux.Devices, devices...) - return nil - } -} - -// WithLinuxDevice adds the device specified by path to the spec -func WithLinuxDevice(path, permissions string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - setLinux(s) - setResources(s) - - dev, err := deviceFromPath(path) - if err != nil { - return err - } - - s.Linux.Devices = append(s.Linux.Devices, *dev) - - s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, specs.LinuxDeviceCgroup{ - Type: dev.Type, - Allow: true, - Major: &dev.Major, - Minor: &dev.Minor, - Access: permissions, - }) - - return nil - } -} - -// WithEnvFile adds environment variables from a file to the container's spec -func WithEnvFile(path string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - var vars []string - f, err := os.Open(path) - if err != nil { - return err - } - defer f.Close() - - sc := bufio.NewScanner(f) - for sc.Scan() { - vars = append(vars, sc.Text()) - } - if err = sc.Err(); err != nil { - return err - } - return WithEnv(vars)(nil, nil, nil, s) - } -} - -// ErrNoShmMount is returned when there is no /dev/shm mount specified in the config -// and an Opts was trying to set a configuration value on the mount. -var ErrNoShmMount = errors.New("no /dev/shm mount specified") - -// WithDevShmSize sets the size of the /dev/shm mount for the container. -// -// The size value is specified in kb, kilobytes. -func WithDevShmSize(kb int64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - for _, m := range s.Mounts { - if m.Source == "shm" && m.Type == "tmpfs" { - for i, o := range m.Options { - if strings.HasPrefix(o, "size=") { - m.Options[i] = fmt.Sprintf("size=%dk", kb) - return nil - } - } - m.Options = append(m.Options, fmt.Sprintf("size=%dk", kb)) - return nil - } - } - return ErrNoShmMount - } -} - -// tryReadonlyMounts is used by the options which are trying to get user/group -// information from container's rootfs. Since the option does read operation -// only, this helper will append ReadOnly mount option to prevent linux kernel -// from syncing whole filesystem in umount syscall. -// -// TODO(fuweid): -// -// Currently, it only works for overlayfs. I think we can apply it to other -// kinds of filesystem. Maybe we can return `ro` option by `snapshotter.Mount` -// API, when the caller passes that experimental annotation -// `containerd.io/snapshot/readonly.mount` something like that. -func tryReadonlyMounts(mounts []mount.Mount) []mount.Mount { - if len(mounts) == 1 && mounts[0].Type == "overlay" { - mounts[0].Options = append(mounts[0].Options, "ro") - } - return mounts -} diff --git a/test/vendor/github.com/containerd/containerd/version/version.go b/test/vendor/github.com/containerd/containerd/version/version.go deleted file mode 100644 index 73857aef8d..0000000000 --- a/test/vendor/github.com/containerd/containerd/version/version.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package version - -import "runtime" - -var ( - // Package is filled at linking time - Package = "github.com/containerd/containerd" - - // Version holds the complete version number. Filled in at linking time. - Version = "1.5.10+unknown" - - // Revision is filled with the VCS (e.g. git) revision being used to build - // the program at linking time. - Revision = "" - - // GoVersion is Go tree's version. - GoVersion = runtime.Version() -) diff --git a/test/vendor/github.com/docker/cli/AUTHORS b/test/vendor/github.com/docker/cli/AUTHORS deleted file mode 100644 index ecb6251ba0..0000000000 --- a/test/vendor/github.com/docker/cli/AUTHORS +++ /dev/null @@ -1,723 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `scripts/docs/generate-authors.sh`. - -Aanand Prasad -Aaron L. Xu -Aaron Lehmann -Aaron.L.Xu -Abdur Rehman -Abhinandan Prativadi -Abin Shahab -Ace Tang -Addam Hardy -Adolfo Ochagavía -Adrian Plata -Adrien Duermael -Adrien Folie -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Akihiro Suda -Akim Demaille -Alan Thompson -Albert Callarisa -Aleksa Sarai -Alessandro Boch -Alex Mavrogiannis -Alex Mayer -Alexander Boyd -Alexander Larsson -Alexander Morozov -Alexander Ryabov -Alexandre González -Alfred Landrum -Alicia Lauerman -Allen Sun -Alvin Deng -Amen Belayneh -Amir Goldstein -Amit Krishnan -Amit Shukla -Amy Lindburg -Anda Xu -Andrea Luzzardi -Andreas Köhler -Andrew France -Andrew Hsu -Andrew Macpherson -Andrew McDonnell -Andrew Po -Andrey Petrov -André Martins -Andy Goldstein -Andy Rothfusz -Anil Madhavapeddy -Ankush Agarwal -Anne Henmi -Anton Polonskiy -Antonio Murdaca -Antonis Kalipetis -Anusha Ragunathan -Ao Li -Arash Deshmeh -Arnaud Porterie -Ashwini Oruganti -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Bastiaan Bakker -BastianHofmann -Ben Bonnefoy -Ben Creasy -Ben Firshman -Benjamin Boudreau -Benoit Sigoure -Bhumika Bayani -Bill Wang -Bin Liu -Bingshen Wang -Boaz Shuster -Bogdan Anton -Boris Pruessmann -Bradley Cicenas -Brandon Mitchell -Brandon Philips -Brent Salisbury -Bret Fisher -Brian (bex) Exelbierd -Brian Goff -Bryan Bess -Bryan Boreham -Bryan Murphy -bryfry -Cameron Spear -Cao Weiwei -Carlo Mion -Carlos Alexandro Becker -Ce Gao -Cedric Davies -Cezar Sa Espinola -Chad Faragher -Chao Wang -Charles Chan -Charles Law -Charles Smith -Charlie Drage -ChaYoung You -Chen Chuanliang -Chen Hanxiao -Chen Mingjie -Chen Qiu -Chris Gavin -Chris Gibson -Chris McKinnel -Chris Snow -Chris Weyl -Christian Persson -Christian Stefanescu -Christophe Robin -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Jones -Christy Norman -Chun Chen -Clinton Kitson -Coenraad Loubser -Colin Hebert -Collin Guarino -Colm Hally -Corey Farrell -Corey Quon -Craig Wilhite -Cristian Staretu -Daehyeok Mun -Dafydd Crosby -dalanlan -Damien Nadé -Dan Cotora -Daniel Cassidy -Daniel Dao -Daniel Farrell -Daniel Gasienica -Daniel Goosen -Daniel Hiltgen -Daniel J Walsh -Daniel Nephin -Daniel Norberg -Daniel Watkins -Daniel Zhang -Danny Berger -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Dave Goodchild -Dave Henderson -Dave Tucker -David Beitey -David Calavera -David Cramer -David Dooling -David Gageot -David Lechner -David Scott -David Sheets -David Williamson -David Xia -David Young -Deng Guangxing -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Docter -Derek McGowan -Deshi Xiao -Dharmit Shah -Dhawal Yogesh Bhanushali -Dieter Reuter -Dima Stopel -Dimitry Andric -Ding Fei -Diogo Monica -Dmitry Gusev -Dmitry Smirnov -Dmitry V. Krivenok -Don Kjer -Dong Chen -Doug Davis -Drew Erny -Ed Costello -Elango Sivanandam -Eli Uriegas -Eli Uriegas -Elias Faxö -Elliot Luo <956941328@qq.com> -Eric Curtin -Eric G. Noriega -Eric Rosenberg -Eric Sage -Eric-Olivier Lamey -Erica Windisch -Erik Hollensbe -Erik St. Martin -Essam A. Hassan -Ethan Haynes -Euan Kemp -Eugene Yakubovich -Evan Allrich -Evan Hazlett -Evan Krall -Evelyn Xu -Everett Toews -Fabio Falci -Fabrizio Soppelsa -Felix Hupfeld -Felix Rabe -Filip Jareš -Flavio Crisciani -Florian Klein -Forest Johnson -Foysal Iqbal -François Scala -Fred Lifton -Frederic Hemberger -Frederick F. Kautz IV -Frederik Nordahl Jul Sabroe -Frieder Bluemle -Gabriel Nicolas Avellaneda -Gaetan de Villele -Gang Qiao -Gary Schaetz -Genki Takiuchi -George MacRorie -George Xie -Gianluca Borello -Gildas Cuisinier -Goksu Toprak -Gou Rao -Grant Reaber -Greg Pflaum -Guilhem Lettron -Guillaume J. Charmes -Guillaume Le Floch -gwx296173 -Günther Jungbluth -Hakan Özler -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harold Cooper -Harry Zhang -He Simei -Helen Xie -Henning Sprang -Henry N -Hernan Garcia -Hongbin Lu -Hu Keping -Huayi Zhang -huqun -Huu Nguyen -Hyzhou Zhy -Ian Campbell -Ian Philpot -Ignacio Capurro -Ilya Dmitrichenko -Ilya Khlopotov -Ilya Sotkov -Ioan Eugen Stan -Isabel Jimenez -Ivan Grcic -Ivan Markin -Jacob Atzen -Jacob Tomlinson -Jaivish Kothari -Jake Lambert -Jake Sanders -James Nesbitt -James Turnbull -Jamie Hannaford -Jan Koprowski -Jan Pazdziora -Jan-Jaap Driessen -Jana Radhakrishnan -Jared Hocutt -Jasmine Hegman -Jason Heiss -Jason Plum -Jay Kamat -Jean Rouge -Jean-Christophe Sirot -Jean-Pierre Huynh -Jeff Lindsay -Jeff Nickoloff -Jeff Silberman -Jeremy Chambers -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jesse Adametz -Jessica Frazelle -Jezeniel Zapanta -Jian Zhang -Jie Luo -Jilles Oldenbeuving -Jim Galasyn -Jimmy Leger -Jimmy Song -jimmyxian -Jintao Zhang -Joao Fernandes -Joe Doliner -Joe Gordon -Joel Handwell -Joey Geiger -Joffrey F -Johan Euphrosine -Johannes 'fish' Ziemke -John Feminella -John Harris -John Howard (VM) -John Laswell -John Maguire -John Mulhausen -John Starks -John Stephens -John Tims -John V. Martinez -John Willis -Jonathan Boulle -Jonathan Lee -Jonathan Lomas -Jonathan McCrohan -Jonh Wendell -Jordan Jennings -Joseph Kern -Josh Bodah -Josh Chorlton -Josh Hawn -Josh Horwitz -Josh Soref -Julien Barbier -Julien Kassar -Julien Maitrehenry -Justas Brazauskas -Justin Cormack -Justin Simonelis -Justyn Temme -Jyrki Puttonen -Jérémie Drouet -Jérôme Petazzoni -Jörg Thalheim -Kai Blin -Kai Qiang Wu (Kennan) -Kara Alexandra -Kareem Khazem -Karthik Nayak -Kat Samperi -Kathryn Spiers -Katie McLaughlin -Ke Xu -Kei Ohmura -Keith Hudgins -Ken Cochrane -Ken ICHIKAWA -Kenfe-Mickaël Laventure -Kevin Burke -Kevin Feyrer -Kevin Kern -Kevin Kirsche -Kevin Meredith -Kevin Richardson -khaled souf -Kim Eik -Kir Kolyshkin -Kotaro Yoshimatsu -Krasi Georgiev -Kris-Mikael Krister -Kun Zhang -Kunal Kushwaha -Lachlan Cooper -Lai Jiangshan -Lars Kellogg-Stedman -Laura Frank -Laurent Erignoux -Lee Gaines -Lei Jitang -Lennie -Leo Gallucci -Lewis Daly -Li Yi -Li Yi -Liang-Chi Hsieh -Lifubang -Lihua Tang -Lily Guo -Lin Lu -Linus Heckemann -Liping Xue -Liron Levin -liwenqi -lixiaobing10051267 -Lloyd Dewolf -Lorenzo Fontana -Louis Opter -Luca Favatella -Luca Marturana -Lucas Chan -Luka Hartwig -Lukasz Zajaczkowski -Lydell Manganti -Lénaïc Huard -Ma Shimiao -Mabin -Madhav Puri -Madhu Venugopal -Malte Janduda -Manjunath A Kumatagi -Mansi Nahar -mapk0y -Marc Bihlmaier -Marco Mariani -Marco Vedovati -Marcus Martins -Marianna Tessel -Marius Sturm -Mark Oates -Marsh Macy -Martin Mosegaard Amdisen -Mary Anthony -Mason Fish -Mason Malone -Mateusz Major -Mathieu Champlon -Matt Gucci -Matt Robenolt -Matteo Orefice -Matthew Heon -Matthieu Hauglustaine -Mauro Porras P -Max Shytikov -Maxime Petazzoni -Mei ChunTao -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Crosby -Michael Friis -Michael Irwin -Michael Käufl -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Steinert -Michael West -Michal Minář -Michał Czeraszkiewicz -Miguel Angel Alvarez Cabrerizo -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Casas -Mike Danese -Mike Dillon -Mike Goelzer -Mike MacCana -mikelinjie <294893458@qq.com> -Mikhail Vasin -Milind Chawre -Mindaugas Rukas -Misty Stanley-Jones -Mohammad Banikazemi -Mohammed Aaqib Ansari -Mohini Anne Dsouza -Moorthy RS -Morgan Bauer -Moysés Borges -Mrunal Patel -muicoder -Muthukumar R -Máximo Cuadros -Mårten Cassel -Nace Oroz -Nahum Shalman -Nalin Dahyabhai -Nao YONASHIRO -Nassim 'Nass' Eddequiouaq -Natalie Parker -Nate Brennand -Nathan Hsieh -Nathan LeClaire -Nathan McCauley -Neil Peterson -Nick Adcock -Nico Stapelbroek -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nikhil Chawla -Nikolas Garofil -Nikolay Milovanov -Nir Soffer -Nishant Totla -NIWA Hideyuki -Noah Treuhaft -O.S. Tezer -ohmystack -Olle Jonsson -Olli Janatuinen -Otto Kekäläinen -Ovidio Mallo -Pascal Borreli -Patrick Böänziger -Patrick Hemmer -Patrick Lang -Paul -Paul Kehrer -Paul Lietar -Paul Weaver -Pavel Pospisil -Paweł Szczekutowicz -Peeyush Gupta -Per Lundberg -Peter Edge -Peter Hsu -Peter Jaffe -Peter Kehl -Peter Nagy -Peter Salvatore -Peter Waller -Phil Estes -Philip Alexander Etling -Philipp Gillé -Philipp Schmied -pidster -pixelistik -Pratik Karki -Prayag Verma -Preston Cowley -Pure White -Qiang Huang -Qinglan Peng -qudongfang -Raghavendra K T -Ravi Shekhar Jethani -Ray Tsang -Reficul -Remy Suen -Renaud Gaubert -Ricardo N Feliciano -Rich Moyse -Richard Mathie -Richard Scothern -Rick Wieman -Ritesh H Shukla -Riyaz Faizullabhoy -Robert Wallis -Robin Naundorf -Robin Speekenbrink -Rodolfo Ortiz -Rogelio Canedo -Rohan Verma -Roland Kammerer -Roman Dudin -Rory Hunter -Ross Boucher -Rubens Figueiredo -Rui Cao -Ryan Belgrave -Ryan Detzel -Ryan Stelly -Ryan Wilson-Perkin -Ryan Zhang -Sainath Grandhi -Sakeven Jiang -Sally O'Malley -Sam Neirinck -Sambuddha Basu -Sami Tabet -Samuel Karp -Santhosh Manohar -Scott Brenner -Scott Collier -Sean Christopherson -Sean Rodman -Sebastiaan van Stijn -Sergey Tryuber -Serhat Gülçiçek -Sevki Hasirci -Shaun Kaasten -Sheng Yang -Shijiang Wei -Shishir Mahajan -Shoubhik Bose -Shukui Yang -Sian Lerk Lau -Sidhartha Mani -sidharthamani -Silvin Lubecki -Simei He -Simon Ferquel -Sindhu S -Slava Semushin -Solomon Hykes -Song Gao -Spencer Brown -squeegels <1674195+squeegels@users.noreply.github.com> -Srini Brahmaroutu -Stefan S. -Stefan Scherer -Stefan Weil -Stephane Jeandeaux -Stephen Day -Stephen Rust -Steve Durrheimer -Steve Richards -Steven Burgess -Subhajit Ghosh -Sun Jianbo -Sune Keller -Sungwon Han -Sunny Gogoi -Sven Dowideit -Sylvain Baubeau -Sébastien HOUZÉ -T K Sourabh -TAGOMORI Satoshi -taiji-tech -Taylor Jones -Tejaswini Duggaraju -Thatcher Peskens -Thomas Gazagnaire -Thomas Krzero -Thomas Leonard -Thomas Léveil -Thomas Riccardi -Thomas Swift -Tianon Gravi -Tianyi Wang -Tibor Vass -Tim Dettrick -Tim Hockin -Tim Smith -Tim Waugh -Tim Wraight -timfeirg -Timothy Hobbs -Tobias Bradtke -Tobias Gesellchen -Todd Whiteman -Tom Denham -Tom Fotherby -Tom Klingenberg -Tom Milligan -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomáš Hrčka -Tony Abboud -Tõnis Tiigi -Trapier Marshall -Travis Cline -Tristan Carel -Tycho Andersen -Tycho Andersen -uhayate -Ulysses Souza -Umesh Yadav -Valentin Lorentz -Veres Lajos -Victor Vieux -Victoria Bialas -Viktor Stanchev -Vimal Raghubir -Vincent Batts -Vincent Bernat -Vincent Demeester -Vincent Woo -Vishnu Kannan -Vivek Goyal -Wang Jie -Wang Lei -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Wataru Ishida -Wayne Song -Wen Cheng Ma -Wenzhi Liang -Wes Morgan -Wewang Xiaorenfine -William Henry -Xianglin Gao -Xiaodong Zhang -Xiaoxi He -Xinbo Weng -Xuecong Liao -Yan Feng -Yanqiang Miao -Yassine Tijani -Yi EungJun -Ying Li -Yong Tang -Yosef Fertel -Yu Peng -Yuan Sun -Yue Zhang -Yunxiang Huang -Zachary Romero -Zander Mackie -zebrilee -Zhang Kun -Zhang Wei -Zhang Wentao -ZhangHang -zhenghenghuo -Zhou Hao -Zhoulin Xie -Zhu Guihua -Álex González -Álvaro Lázaro -Átila Camurça Alves -徐俊杰 diff --git a/test/vendor/github.com/docker/cli/LICENSE b/test/vendor/github.com/docker/cli/LICENSE deleted file mode 100644 index 9c8e20ab85..0000000000 --- a/test/vendor/github.com/docker/cli/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2017 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/test/vendor/github.com/docker/cli/NOTICE b/test/vendor/github.com/docker/cli/NOTICE deleted file mode 100644 index 58b19b6d15..0000000000 --- a/test/vendor/github.com/docker/cli/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/creack/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/test/vendor/github.com/docker/cli/cli/config/config.go b/test/vendor/github.com/docker/cli/cli/config/config.go deleted file mode 100644 index c860cf7125..0000000000 --- a/test/vendor/github.com/docker/cli/cli/config/config.go +++ /dev/null @@ -1,140 +0,0 @@ -package config - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/docker/cli/cli/config/configfile" - "github.com/docker/cli/cli/config/credentials" - "github.com/docker/cli/cli/config/types" - "github.com/docker/docker/pkg/homedir" - "github.com/pkg/errors" -) - -const ( - // ConfigFileName is the name of config file - ConfigFileName = "config.json" - configFileDir = ".docker" - oldConfigfile = ".dockercfg" - contextsDir = "contexts" -) - -var ( - configDir = os.Getenv("DOCKER_CONFIG") -) - -func init() { - if configDir == "" { - configDir = filepath.Join(homedir.Get(), configFileDir) - } -} - -// Dir returns the directory the configuration file is stored in -func Dir() string { - return configDir -} - -// ContextStoreDir returns the directory the docker contexts are stored in -func ContextStoreDir() string { - return filepath.Join(Dir(), contextsDir) -} - -// SetDir sets the directory the configuration file is stored in -func SetDir(dir string) { - configDir = filepath.Clean(dir) -} - -// Path returns the path to a file relative to the config dir -func Path(p ...string) (string, error) { - path := filepath.Join(append([]string{Dir()}, p...)...) - if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { - return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir()) - } - return path, nil -} - -// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from -// a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LegacyLoadFromReader(configData) - return &configFile, err -} - -// LoadFromReader is a convenience function that creates a ConfigFile object from -// a reader -func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LoadFromReader(configData) - return &configFile, err -} - -// Load reads the configuration files in the given directory, and sets up -// the auth config information and returns values. -// FIXME: use the internal golang config parser -func Load(configDir string) (*configfile.ConfigFile, error) { - if configDir == "" { - configDir = Dir() - } - - filename := filepath.Join(configDir, ConfigFileName) - configFile := configfile.New(filename) - - // Try happy path first - latest config file - if _, err := os.Stat(filename); err == nil { - file, err := os.Open(filename) - if err != nil { - return configFile, errors.Wrap(err, filename) - } - defer file.Close() - err = configFile.LoadFromReader(file) - if err != nil { - err = errors.Wrap(err, filename) - } - return configFile, err - } else if !os.IsNotExist(err) { - // if file is there but we can't stat it for any reason other - // than it doesn't exist then stop - return configFile, errors.Wrap(err, filename) - } - - // Can't find latest config file so check for the old one - homedir, err := os.UserHomeDir() - if err != nil { - return configFile, errors.Wrap(err, oldConfigfile) - } - confFile := filepath.Join(homedir, oldConfigfile) - if _, err := os.Stat(confFile); err != nil { - return configFile, nil //missing file is not an error - } - file, err := os.Open(confFile) - if err != nil { - return configFile, errors.Wrap(err, filename) - } - defer file.Close() - err = configFile.LegacyLoadFromReader(file) - if err != nil { - return configFile, errors.Wrap(err, filename) - } - return configFile, nil -} - -// LoadDefaultConfigFile attempts to load the default config file and returns -// an initialized ConfigFile struct if none is found. -func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { - configFile, err := Load(Dir()) - if err != nil { - fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) - } - if !configFile.ContainsAuth() { - configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) - } - return configFile -} diff --git a/test/vendor/github.com/docker/cli/cli/config/configfile/file.go b/test/vendor/github.com/docker/cli/cli/config/configfile/file.go deleted file mode 100644 index 388a5d54d6..0000000000 --- a/test/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ /dev/null @@ -1,387 +0,0 @@ -package configfile - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/cli/cli/config/credentials" - "github.com/docker/cli/cli/config/types" - "github.com/pkg/errors" -) - -const ( - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexServer = "https://index.docker.io/v1/" -) - -// ConfigFile ~/.docker/config.json file info -type ConfigFile struct { - AuthConfigs map[string]types.AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - NetworksFormat string `json:"networksFormat,omitempty"` - PluginsFormat string `json:"pluginsFormat,omitempty"` - VolumesFormat string `json:"volumesFormat,omitempty"` - StatsFormat string `json:"statsFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - CredentialHelpers map[string]string `json:"credHelpers,omitempty"` - Filename string `json:"-"` // Note: for internal use only - ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` - ServicesFormat string `json:"servicesFormat,omitempty"` - TasksFormat string `json:"tasksFormat,omitempty"` - SecretFormat string `json:"secretFormat,omitempty"` - ConfigFormat string `json:"configFormat,omitempty"` - NodesFormat string `json:"nodesFormat,omitempty"` - PruneFilters []string `json:"pruneFilters,omitempty"` - Proxies map[string]ProxyConfig `json:"proxies,omitempty"` - Experimental string `json:"experimental,omitempty"` - StackOrchestrator string `json:"stackOrchestrator,omitempty"` - Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` - CurrentContext string `json:"currentContext,omitempty"` - CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` - Plugins map[string]map[string]string `json:"plugins,omitempty"` - Aliases map[string]string `json:"aliases,omitempty"` -} - -// ProxyConfig contains proxy configuration settings -type ProxyConfig struct { - HTTPProxy string `json:"httpProxy,omitempty"` - HTTPSProxy string `json:"httpsProxy,omitempty"` - NoProxy string `json:"noProxy,omitempty"` - FTPProxy string `json:"ftpProxy,omitempty"` -} - -// KubernetesConfig contains Kubernetes orchestrator settings -type KubernetesConfig struct { - AllNamespaces string `json:"allNamespaces,omitempty"` -} - -// New initializes an empty configuration file for the given filename 'fn' -func New(fn string) *ConfigFile { - return &ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - HTTPHeaders: make(map[string]string), - Filename: fn, - Plugins: make(map[string]map[string]string), - Aliases: make(map[string]string), - } -} - -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return errors.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return errors.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - authConfig.ServerAddress = defaultIndexServer - configFile.AuthConfigs[defaultIndexServer] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - -// LoadFromReader reads the configuration data given and sets up the auth config -// information with given directory and populates the receiver object -func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { - if err := json.NewDecoder(configData).Decode(&configFile); err != nil { - return err - } - var err error - for addr, ac := range configFile.AuthConfigs { - if ac.Auth != "" { - ac.Username, ac.Password, err = decodeAuth(ac.Auth) - if err != nil { - return err - } - } - ac.Auth = "" - ac.ServerAddress = addr - configFile.AuthConfigs[addr] = ac - } - return checkKubernetesConfiguration(configFile.Kubernetes) -} - -// ContainsAuth returns whether there is authentication configured -// in this file or not. -func (configFile *ConfigFile) ContainsAuth() bool { - return configFile.CredentialsStore != "" || - len(configFile.CredentialHelpers) > 0 || - len(configFile.AuthConfigs) > 0 -} - -// GetAuthConfigs returns the mapping of repo to auth configuration -func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig { - return configFile.AuthConfigs -} - -// SaveToWriter encodes and writes out all the authorization information to -// the given writer -func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { - // Encode sensitive data into a new/temp struct - tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) - for k, authConfig := range configFile.AuthConfigs { - authCopy := authConfig - // encode and save the authstring, while blanking out the original fields - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - tmpAuthConfigs[k] = authCopy - } - - saveAuthConfigs := configFile.AuthConfigs - configFile.AuthConfigs = tmpAuthConfigs - defer func() { configFile.AuthConfigs = saveAuthConfigs }() - - data, err := json.MarshalIndent(configFile, "", "\t") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Save encodes and writes out all the authorization information -func (configFile *ConfigFile) Save() error { - if configFile.Filename == "" { - return errors.Errorf("Can't save config with empty filename") - } - - dir := filepath.Dir(configFile.Filename) - if err := os.MkdirAll(dir, 0700); err != nil { - return err - } - temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename)) - if err != nil { - return err - } - err = configFile.SaveToWriter(temp) - temp.Close() - if err != nil { - os.Remove(temp.Name()) - return err - } - return os.Rename(temp.Name(), configFile.Filename) -} - -// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and -// then checking this against any environment variables provided to the container -func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { - var cfgKey string - - if _, ok := configFile.Proxies[host]; !ok { - cfgKey = "default" - } else { - cfgKey = host - } - - config := configFile.Proxies[cfgKey] - permitted := map[string]*string{ - "HTTP_PROXY": &config.HTTPProxy, - "HTTPS_PROXY": &config.HTTPSProxy, - "NO_PROXY": &config.NoProxy, - "FTP_PROXY": &config.FTPProxy, - } - m := runOpts - if m == nil { - m = make(map[string]*string) - } - for k := range permitted { - if *permitted[k] == "" { - continue - } - if _, ok := m[k]; !ok { - m[k] = permitted[k] - } - if _, ok := m[strings.ToLower(k)]; !ok { - m[strings.ToLower(k)] = permitted[k] - } - } - return m -} - -// encodeAuth creates a base64 encoded string to containing authorization information -func encodeAuth(authConfig *types.AuthConfig) string { - if authConfig.Username == "" && authConfig.Password == "" { - return "" - } - - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decodeAuth decodes a base64 encoded string and returns username and password -func decodeAuth(authStr string) (string, string, error) { - if authStr == "" { - return "", "", nil - } - - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", errors.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", errors.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} - -// GetCredentialsStore returns a new credentials store from the settings in the -// configuration file -func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { - if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { - return newNativeStore(configFile, helper) - } - return credentials.NewFileStore(configFile) -} - -// var for unit testing. -var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store { - return credentials.NewNativeStore(configFile, helperSuffix) -} - -// GetAuthConfig for a repository from the credential store -func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) { - return configFile.GetCredentialsStore(registryHostname).Get(registryHostname) -} - -// getConfiguredCredentialStore returns the credential helper configured for the -// given registry, the default credsStore, or the empty string if neither are -// configured. -func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string { - if c.CredentialHelpers != nil && registryHostname != "" { - if helper, exists := c.CredentialHelpers[registryHostname]; exists { - return helper - } - } - return c.CredentialsStore -} - -// GetAllCredentials returns all of the credentials stored in all of the -// configured credential stores. -func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { - auths := make(map[string]types.AuthConfig) - addAll := func(from map[string]types.AuthConfig) { - for reg, ac := range from { - auths[reg] = ac - } - } - - defaultStore := configFile.GetCredentialsStore("") - newAuths, err := defaultStore.GetAll() - if err != nil { - return nil, err - } - addAll(newAuths) - - // Auth configs from a registry-specific helper should override those from the default store. - for registryHostname := range configFile.CredentialHelpers { - newAuth, err := configFile.GetAuthConfig(registryHostname) - if err != nil { - return nil, err - } - auths[registryHostname] = newAuth - } - return auths, nil -} - -// GetFilename returns the file name that this config file is based on. -func (configFile *ConfigFile) GetFilename() string { - return configFile.Filename -} - -// PluginConfig retrieves the requested option for the given plugin. -func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) { - if configFile.Plugins == nil { - return "", false - } - pluginConfig, ok := configFile.Plugins[pluginname] - if !ok { - return "", false - } - value, ok := pluginConfig[option] - return value, ok -} - -// SetPluginConfig sets the option to the given value for the given -// plugin. Passing a value of "" will remove the option. If removing -// the final config item for a given plugin then also cleans up the -// overall plugin entry. -func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) { - if configFile.Plugins == nil { - configFile.Plugins = make(map[string]map[string]string) - } - pluginConfig, ok := configFile.Plugins[pluginname] - if !ok { - pluginConfig = make(map[string]string) - configFile.Plugins[pluginname] = pluginConfig - } - if value != "" { - pluginConfig[option] = value - } else { - delete(pluginConfig, option) - } - if len(pluginConfig) == 0 { - delete(configFile.Plugins, pluginname) - } -} - -func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error { - if kubeConfig == nil { - return nil - } - switch kubeConfig.AllNamespaces { - case "": - case "enabled": - case "disabled": - default: - return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces) - } - return nil -} diff --git a/test/vendor/github.com/docker/cli/cli/config/credentials/credentials.go b/test/vendor/github.com/docker/cli/cli/config/credentials/credentials.go deleted file mode 100644 index 28d58ec48d..0000000000 --- a/test/vendor/github.com/docker/cli/cli/config/credentials/credentials.go +++ /dev/null @@ -1,17 +0,0 @@ -package credentials - -import ( - "github.com/docker/cli/cli/config/types" -) - -// Store is the interface that any credentials store must implement. -type Store interface { - // Erase removes credentials from the store for a given server. - Erase(serverAddress string) error - // Get retrieves credentials from the store for a given server. - Get(serverAddress string) (types.AuthConfig, error) - // GetAll retrieves all the credentials from the store. - GetAll() (map[string]types.AuthConfig, error) - // Store saves credentials in the store. - Store(authConfig types.AuthConfig) error -} diff --git a/test/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/test/vendor/github.com/docker/cli/cli/config/credentials/default_store.go deleted file mode 100644 index 7a760f1a97..0000000000 --- a/test/vendor/github.com/docker/cli/cli/config/credentials/default_store.go +++ /dev/null @@ -1,21 +0,0 @@ -package credentials - -import ( - "os/exec" -) - -// DetectDefaultStore return the default credentials store for the platform if -// the store executable is available. -func DetectDefaultStore(store string) string { - platformDefault := defaultCredentialsStore() - - // user defined or no default for platform - if store != "" || platformDefault == "" { - return store - } - - if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { - return platformDefault - } - return "" -} diff --git a/test/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go b/test/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go deleted file mode 100644 index 5d42dec622..0000000000 --- a/test/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go +++ /dev/null @@ -1,5 +0,0 @@ -package credentials - -func defaultCredentialsStore() string { - return "osxkeychain" -} diff --git a/test/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go b/test/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go deleted file mode 100644 index a9012c6d4a..0000000000 --- a/test/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go +++ /dev/null @@ -1,13 +0,0 @@ -package credentials - -import ( - "os/exec" -) - -func defaultCredentialsStore() string { - if _, err := exec.LookPath("pass"); err == nil { - return "pass" - } - - return "secretservice" -} diff --git a/test/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/test/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go deleted file mode 100644 index 3028168ac2..0000000000 --- a/test/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows,!darwin,!linux - -package credentials - -func defaultCredentialsStore() string { - return "" -} diff --git a/test/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go b/test/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go deleted file mode 100644 index bb799ca61b..0000000000 --- a/test/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package credentials - -func defaultCredentialsStore() string { - return "wincred" -} diff --git a/test/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/test/vendor/github.com/docker/cli/cli/config/credentials/file_store.go deleted file mode 100644 index e509820b73..0000000000 --- a/test/vendor/github.com/docker/cli/cli/config/credentials/file_store.go +++ /dev/null @@ -1,81 +0,0 @@ -package credentials - -import ( - "strings" - - "github.com/docker/cli/cli/config/types" -) - -type store interface { - Save() error - GetAuthConfigs() map[string]types.AuthConfig - GetFilename() string -} - -// fileStore implements a credentials store using -// the docker configuration file to keep the credentials in plain text. -type fileStore struct { - file store -} - -// NewFileStore creates a new file credentials store. -func NewFileStore(file store) Store { - return &fileStore{file: file} -} - -// Erase removes the given credentials from the file store. -func (c *fileStore) Erase(serverAddress string) error { - delete(c.file.GetAuthConfigs(), serverAddress) - return c.file.Save() -} - -// Get retrieves credentials for a specific server from the file store. -func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { - authConfig, ok := c.file.GetAuthConfigs()[serverAddress] - if !ok { - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for r, ac := range c.file.GetAuthConfigs() { - if serverAddress == ConvertToHostname(r) { - return ac, nil - } - } - - authConfig = types.AuthConfig{} - } - return authConfig, nil -} - -func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { - return c.file.GetAuthConfigs(), nil -} - -// Store saves the given credentials in the file store. -func (c *fileStore) Store(authConfig types.AuthConfig) error { - c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig - return c.file.Save() -} - -func (c *fileStore) GetFilename() string { - return c.file.GetFilename() -} - -func (c *fileStore) IsFileStore() bool { - return true -} - -// ConvertToHostname converts a registry url which has http|https prepended -// to just an hostname. -// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. -func ConvertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} diff --git a/test/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/test/vendor/github.com/docker/cli/cli/config/credentials/native_store.go deleted file mode 100644 index afe542cc3c..0000000000 --- a/test/vendor/github.com/docker/cli/cli/config/credentials/native_store.go +++ /dev/null @@ -1,143 +0,0 @@ -package credentials - -import ( - "github.com/docker/cli/cli/config/types" - "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" -) - -const ( - remoteCredentialsPrefix = "docker-credential-" - tokenUsername = "" -) - -// nativeStore implements a credentials store -// using native keychain to keep credentials secure. -// It piggybacks into a file store to keep users' emails. -type nativeStore struct { - programFunc client.ProgramFunc - fileStore Store -} - -// NewNativeStore creates a new native store that -// uses a remote helper program to manage credentials. -func NewNativeStore(file store, helperSuffix string) Store { - name := remoteCredentialsPrefix + helperSuffix - return &nativeStore{ - programFunc: client.NewShellProgramFunc(name), - fileStore: NewFileStore(file), - } -} - -// Erase removes the given credentials from the native store. -func (c *nativeStore) Erase(serverAddress string) error { - if err := client.Erase(c.programFunc, serverAddress); err != nil { - return err - } - - // Fallback to plain text store to remove email - return c.fileStore.Erase(serverAddress) -} - -// Get retrieves credentials for a specific server from the native store. -func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { - // load user email if it exist or an empty auth config. - auth, _ := c.fileStore.Get(serverAddress) - - creds, err := c.getCredentialsFromStore(serverAddress) - if err != nil { - return auth, err - } - auth.Username = creds.Username - auth.IdentityToken = creds.IdentityToken - auth.Password = creds.Password - - return auth, nil -} - -// GetAll retrieves all the credentials from the native store. -func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { - auths, err := c.listCredentialsInStore() - if err != nil { - return nil, err - } - - // Emails are only stored in the file store. - // This call can be safely eliminated when emails are removed. - fileConfigs, _ := c.fileStore.GetAll() - - authConfigs := make(map[string]types.AuthConfig) - for registry := range auths { - creds, err := c.getCredentialsFromStore(registry) - if err != nil { - return nil, err - } - ac := fileConfigs[registry] // might contain Email - ac.Username = creds.Username - ac.Password = creds.Password - ac.IdentityToken = creds.IdentityToken - authConfigs[registry] = ac - } - - return authConfigs, nil -} - -// Store saves the given credentials in the file store. -func (c *nativeStore) Store(authConfig types.AuthConfig) error { - if err := c.storeCredentialsInStore(authConfig); err != nil { - return err - } - authConfig.Username = "" - authConfig.Password = "" - authConfig.IdentityToken = "" - - // Fallback to old credential in plain text to save only the email - return c.fileStore.Store(authConfig) -} - -// storeCredentialsInStore executes the command to store the credentials in the native store. -func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { - creds := &credentials.Credentials{ - ServerURL: config.ServerAddress, - Username: config.Username, - Secret: config.Password, - } - - if config.IdentityToken != "" { - creds.Username = tokenUsername - creds.Secret = config.IdentityToken - } - - return client.Store(c.programFunc, creds) -} - -// getCredentialsFromStore executes the command to get the credentials from the native store. -func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { - var ret types.AuthConfig - - creds, err := client.Get(c.programFunc, serverAddress) - if err != nil { - if credentials.IsErrCredentialsNotFound(err) { - // do not return an error if the credentials are not - // in the keychain. Let docker ask for new credentials. - return ret, nil - } - return ret, err - } - - if creds.Username == tokenUsername { - ret.IdentityToken = creds.Secret - } else { - ret.Password = creds.Secret - ret.Username = creds.Username - } - - ret.ServerAddress = serverAddress - return ret, nil -} - -// listCredentialsInStore returns a listing of stored credentials as a map of -// URL -> username. -func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { - return client.List(c.programFunc) -} diff --git a/test/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/test/vendor/github.com/docker/cli/cli/config/types/authconfig.go deleted file mode 100644 index 056af6b842..0000000000 --- a/test/vendor/github.com/docker/cli/cli/config/types/authconfig.go +++ /dev/null @@ -1,22 +0,0 @@ -package types - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/test/vendor/github.com/docker/distribution/LICENSE b/test/vendor/github.com/docker/distribution/LICENSE deleted file mode 100644 index e06d208186..0000000000 --- a/test/vendor/github.com/docker/distribution/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/test/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/test/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go deleted file mode 100644 index 2c3ebe1653..0000000000 --- a/test/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go +++ /dev/null @@ -1,27 +0,0 @@ -package challenge - -import ( - "net/url" - "strings" -) - -// FROM: https://golang.org/src/net/http/http.go -// Given a string of the form "host", "host:port", or "[ipv6::address]:port", -// return true if the string includes a port. -func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } - -// FROM: http://golang.org/src/net/http/transport.go -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -// canonicalAddr returns url.Host but always with a ":port" suffix -// FROM: http://golang.org/src/net/http/transport.go -func canonicalAddr(url *url.URL) string { - addr := url.Host - if !hasPort(addr) { - return addr + ":" + portMap[url.Scheme] - } - return addr -} diff --git a/test/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/test/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go deleted file mode 100644 index 6e3f1ccc41..0000000000 --- a/test/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ /dev/null @@ -1,237 +0,0 @@ -package challenge - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "sync" -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // Scheme is the auth-scheme according to RFC 2617 - Scheme string - - // Parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -// Manager manages the challenges for endpoints. -// The challenges are pulled out of HTTP responses. Only -// responses which expect challenges should be added to -// the manager, since a non-unauthorized request will be -// viewed as not requiring challenges. -type Manager interface { - // GetChallenges returns the challenges for the given - // endpoint URL. - GetChallenges(endpoint url.URL) ([]Challenge, error) - - // AddResponse adds the response to the challenge - // manager. The challenges will be parsed out of - // the WWW-Authenicate headers and added to the - // URL which was produced the response. If the - // response was authorized, any challenges for the - // endpoint will be cleared. - AddResponse(resp *http.Response) error -} - -// NewSimpleManager returns an instance of -// Manger which only maps endpoints to challenges -// based on the responses which have been added the -// manager. The simple manager will make no attempt to -// perform requests on the endpoints or cache the responses -// to a backend. -func NewSimpleManager() Manager { - return &simpleManager{ - Challenges: make(map[string][]Challenge), - } -} - -type simpleManager struct { - sync.RWMutex - Challenges map[string][]Challenge -} - -func normalizeURL(endpoint *url.URL) { - endpoint.Host = strings.ToLower(endpoint.Host) - endpoint.Host = canonicalAddr(endpoint) -} - -func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { - normalizeURL(&endpoint) - - m.RLock() - defer m.RUnlock() - challenges := m.Challenges[endpoint.String()] - return challenges, nil -} - -func (m *simpleManager) AddResponse(resp *http.Response) error { - challenges := ResponseChallenges(resp) - if resp.Request == nil { - return fmt.Errorf("missing request reference") - } - urlCopy := url.URL{ - Path: resp.Request.URL.Path, - Host: resp.Request.URL.Host, - Scheme: resp.Request.URL.Scheme, - } - normalizeURL(&urlCopy) - - m.Lock() - defer m.Unlock() - m.Challenges[urlCopy.String()] = challenges - return nil -} - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ResponseChallenges returns a list of authorization challenges -// for the given http Response. Challenges are only checked if -// the response status code was a 401. -func ResponseChallenges(resp *http.Response) []Challenge { - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header) - } - - return nil -} - -func parseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/test/vendor/github.com/docker/docker-credential-helpers/LICENSE b/test/vendor/github.com/docker/docker-credential-helpers/LICENSE deleted file mode 100644 index 1ea555e2af..0000000000 --- a/test/vendor/github.com/docker/docker-credential-helpers/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2016 David Calavera - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/github.com/docker/docker-credential-helpers/client/client.go b/test/vendor/github.com/docker/docker-credential-helpers/client/client.go deleted file mode 100644 index d1d0434cb5..0000000000 --- a/test/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ /dev/null @@ -1,121 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - - "github.com/docker/docker-credential-helpers/credentials" -) - -// isValidCredsMessage checks if 'msg' contains invalid credentials error message. -// It returns whether the logs are free of invalid credentials errors and the error if it isn't. -// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. -func isValidCredsMessage(msg string) error { - if credentials.IsCredentialsMissingServerURLMessage(msg) { - return credentials.NewErrCredentialsMissingServerURL() - } - - if credentials.IsCredentialsMissingUsernameMessage(msg) { - return credentials.NewErrCredentialsMissingUsername() - } - - return nil -} - -// Store uses an external program to save credentials. -func Store(program ProgramFunc, creds *credentials.Credentials) error { - cmd := program("store") - - buffer := new(bytes.Buffer) - if err := json.NewEncoder(buffer).Encode(creds); err != nil { - return err - } - cmd.Input(buffer) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// Get executes an external program to get the credentials from a native store. -func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { - cmd := program("get") - cmd.Input(strings.NewReader(serverURL)) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if credentials.IsErrCredentialsNotFoundMessage(t) { - return nil, credentials.NewErrCredentialsNotFound() - } - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) - } - - resp := &credentials.Credentials{ - ServerURL: serverURL, - } - - if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { - return nil, err - } - - return resp, nil -} - -// Erase executes a program to remove the server credentials from the native store. -func Erase(program ProgramFunc, serverURL string) error { - cmd := program("erase") - cmd.Input(strings.NewReader(serverURL)) - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// List executes a program to list server credentials in the native store. -func List(program ProgramFunc) (map[string]string, error) { - cmd := program("list") - cmd.Input(strings.NewReader("unused")) - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) - } - - var resp map[string]string - if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { - return nil, err - } - - return resp, nil -} diff --git a/test/vendor/github.com/docker/docker-credential-helpers/client/command.go b/test/vendor/github.com/docker/docker-credential-helpers/client/command.go deleted file mode 100644 index 8da3343065..0000000000 --- a/test/vendor/github.com/docker/docker-credential-helpers/client/command.go +++ /dev/null @@ -1,56 +0,0 @@ -package client - -import ( - "fmt" - "io" - "os" - "os/exec" -) - -// Program is an interface to execute external programs. -type Program interface { - Output() ([]byte, error) - Input(in io.Reader) -} - -// ProgramFunc is a type of function that initializes programs based on arguments. -type ProgramFunc func(args ...string) Program - -// NewShellProgramFunc creates programs that are executed in a Shell. -func NewShellProgramFunc(name string) ProgramFunc { - return NewShellProgramFuncWithEnv(name, nil) -} - -// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables -func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { - return func(args ...string) Program { - return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} - } -} - -func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { - programCmd := exec.Command(commandName, args...) - programCmd.Env = os.Environ() - if env != nil { - for k, v := range *env { - programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) - } - } - programCmd.Stderr = os.Stderr - return programCmd -} - -// Shell invokes shell commands to talk with a remote credentials helper. -type Shell struct { - cmd *exec.Cmd -} - -// Output returns responses from the remote credentials helper. -func (s *Shell) Output() ([]byte, error) { - return s.cmd.Output() -} - -// Input sets the input to send to a remote credentials helper. -func (s *Shell) Input(in io.Reader) { - s.cmd.Stdin = in -} diff --git a/test/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/test/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go deleted file mode 100644 index da8b594e7f..0000000000 --- a/test/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go +++ /dev/null @@ -1,186 +0,0 @@ -package credentials - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "strings" -) - -// Credentials holds the information shared between docker and the credentials store. -type Credentials struct { - ServerURL string - Username string - Secret string -} - -// isValid checks the integrity of Credentials object such that no credentials lack -// a server URL or a username. -// It returns whether the credentials are valid and the error if it isn't. -// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername -func (c *Credentials) isValid() (bool, error) { - if len(c.ServerURL) == 0 { - return false, NewErrCredentialsMissingServerURL() - } - - if len(c.Username) == 0 { - return false, NewErrCredentialsMissingUsername() - } - - return true, nil -} - -// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. -// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, -// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" -var CredsLabel = "Docker Credentials" - -// SetCredsLabel is a simple setter for CredsLabel -func SetCredsLabel(label string) { - CredsLabel = label -} - -// Serve initializes the credentials helper and parses the action argument. -// This function is designed to be called from a command line interface. -// It uses os.Args[1] as the key for the action. -// It uses os.Stdin as input and os.Stdout as output. -// This function terminates the program with os.Exit(1) if there is an error. -func Serve(helper Helper) { - var err error - if len(os.Args) != 2 { - err = fmt.Errorf("Usage: %s ", os.Args[0]) - } - - if err == nil { - err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) - } - - if err != nil { - fmt.Fprintf(os.Stdout, "%v\n", err) - os.Exit(1) - } -} - -// HandleCommand uses a helper and a key to run a credential action. -func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { - switch key { - case "store": - return Store(helper, in) - case "get": - return Get(helper, in, out) - case "erase": - return Erase(helper, in) - case "list": - return List(helper, out) - case "version": - return PrintVersion(out) - } - return fmt.Errorf("Unknown credential action `%s`", key) -} - -// Store uses a helper and an input reader to save credentials. -// The reader must contain the JSON serialization of a Credentials struct. -func Store(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - var creds Credentials - if err := json.NewDecoder(buffer).Decode(&creds); err != nil { - return err - } - - if ok, err := creds.isValid(); !ok { - return err - } - - return helper.Add(&creds) -} - -// Get retrieves the credentials for a given server url. -// The reader must contain the server URL to search. -// The writer is used to write the JSON serialization of the credentials. -func Get(helper Helper, reader io.Reader, writer io.Writer) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - if len(serverURL) == 0 { - return NewErrCredentialsMissingServerURL() - } - - username, secret, err := helper.Get(serverURL) - if err != nil { - return err - } - - resp := Credentials{ - ServerURL: serverURL, - Username: username, - Secret: secret, - } - - buffer.Reset() - if err := json.NewEncoder(buffer).Encode(resp); err != nil { - return err - } - - fmt.Fprint(writer, buffer.String()) - return nil -} - -// Erase removes credentials from the store. -// The reader must contain the server URL to remove. -func Erase(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - if len(serverURL) == 0 { - return NewErrCredentialsMissingServerURL() - } - - return helper.Delete(serverURL) -} - -//List returns all the serverURLs of keys in -//the OS store as a list of strings -func List(helper Helper, writer io.Writer) error { - accts, err := helper.List() - if err != nil { - return err - } - return json.NewEncoder(writer).Encode(accts) -} - -//PrintVersion outputs the current version. -func PrintVersion(writer io.Writer) error { - fmt.Fprintln(writer, Version) - return nil -} diff --git a/test/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/test/vendor/github.com/docker/docker-credential-helpers/credentials/error.go deleted file mode 100644 index fe6a5aef45..0000000000 --- a/test/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ /dev/null @@ -1,102 +0,0 @@ -package credentials - -const ( - // ErrCredentialsNotFound standardizes the not found error, so every helper returns - // the same message and docker can handle it properly. - errCredentialsNotFoundMessage = "credentials not found in native keychain" - - // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize - // invalid credentials or credentials management operations - errCredentialsMissingServerURLMessage = "no credentials server URL" - errCredentialsMissingUsernameMessage = "no credentials username" -) - -// errCredentialsNotFound represents an error -// raised when credentials are not in the store. -type errCredentialsNotFound struct{} - -// Error returns the standard error message -// for when the credentials are not in the store. -func (errCredentialsNotFound) Error() string { - return errCredentialsNotFoundMessage -} - -// NewErrCredentialsNotFound creates a new error -// for when the credentials are not in the store. -func NewErrCredentialsNotFound() error { - return errCredentialsNotFound{} -} - -// IsErrCredentialsNotFound returns true if the error -// was caused by not having a set of credentials in a store. -func IsErrCredentialsNotFound(err error) bool { - _, ok := err.(errCredentialsNotFound) - return ok -} - -// IsErrCredentialsNotFoundMessage returns true if the error -// was caused by not having a set of credentials in a store. -// -// This function helps to check messages returned by an -// external program via its standard output. -func IsErrCredentialsNotFoundMessage(err string) bool { - return err == errCredentialsNotFoundMessage -} - -// errCredentialsMissingServerURL represents an error raised -// when the credentials object has no server URL or when no -// server URL is provided to a credentials operation requiring -// one. -type errCredentialsMissingServerURL struct{} - -func (errCredentialsMissingServerURL) Error() string { - return errCredentialsMissingServerURLMessage -} - -// errCredentialsMissingUsername represents an error raised -// when the credentials object has no username or when no -// username is provided to a credentials operation requiring -// one. -type errCredentialsMissingUsername struct{} - -func (errCredentialsMissingUsername) Error() string { - return errCredentialsMissingUsernameMessage -} - -// NewErrCredentialsMissingServerURL creates a new error for -// errCredentialsMissingServerURL. -func NewErrCredentialsMissingServerURL() error { - return errCredentialsMissingServerURL{} -} - -// NewErrCredentialsMissingUsername creates a new error for -// errCredentialsMissingUsername. -func NewErrCredentialsMissingUsername() error { - return errCredentialsMissingUsername{} -} - -// IsCredentialsMissingServerURL returns true if the error -// was an errCredentialsMissingServerURL. -func IsCredentialsMissingServerURL(err error) bool { - _, ok := err.(errCredentialsMissingServerURL) - return ok -} - -// IsCredentialsMissingServerURLMessage checks for an -// errCredentialsMissingServerURL in the error message. -func IsCredentialsMissingServerURLMessage(err string) bool { - return err == errCredentialsMissingServerURLMessage -} - -// IsCredentialsMissingUsername returns true if the error -// was an errCredentialsMissingUsername. -func IsCredentialsMissingUsername(err error) bool { - _, ok := err.(errCredentialsMissingUsername) - return ok -} - -// IsCredentialsMissingUsernameMessage checks for an -// errCredentialsMissingUsername in the error message. -func IsCredentialsMissingUsernameMessage(err string) bool { - return err == errCredentialsMissingUsernameMessage -} diff --git a/test/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/test/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go deleted file mode 100644 index 135acd254d..0000000000 --- a/test/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go +++ /dev/null @@ -1,14 +0,0 @@ -package credentials - -// Helper is the interface a credentials store helper must implement. -type Helper interface { - // Add appends credentials to the store. - Add(*Credentials) error - // Delete removes credentials from the store. - Delete(serverURL string) error - // Get retrieves credentials from the store. - // It returns username and secret as strings. - Get(serverURL string) (string, string, error) - // List returns the stored serverURLs and their associated usernames. - List() (map[string]string, error) -} diff --git a/test/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/test/vendor/github.com/docker/docker-credential-helpers/credentials/version.go deleted file mode 100644 index c2cc3e2e02..0000000000 --- a/test/vendor/github.com/docker/docker-credential-helpers/credentials/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package credentials - -// Version holds a string describing the current version -const Version = "0.6.3" diff --git a/test/vendor/github.com/docker/docker/AUTHORS b/test/vendor/github.com/docker/docker/AUTHORS deleted file mode 100644 index d5b6cbbead..0000000000 --- a/test/vendor/github.com/docker/docker/AUTHORS +++ /dev/null @@ -1,2082 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Hnatiw -Aaron Huslage -Aaron L. Xu -Aaron Lehmann -Aaron Welch -Aaron.L.Xu -Abel Muiño -Abhijeet Kasurde -Abhinandan Prativadi -Abhinav Ajgaonkar -Abhishek Chanda -Abhishek Sharma -Abin Shahab -Adam Avilla -Adam Eijdenberg -Adam Kunk -Adam Miller -Adam Mills -Adam Pointer -Adam Singer -Adam Walz -Addam Hardy -Aditi Rajagopal -Aditya -Adnan Khan -Adolfo Ochagavía -Adria Casas -Adrian Moisey -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akash Gupta -Akihiro Matsushima -Akihiro Suda -Akim Demaille -Akira Koyasu -Akshay Karle -Al Tobey -alambike -Alan Scherger -Alan Thompson -Albert Callarisa -Albert Zhang -Alejandro González Hevia -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Chen -Alex Coventry -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Goodman -Alex Olshansky -Alex Samorukov -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Midlash -Alexander Morozov -Alexander Shopov -Alexandre Beslic -Alexandre Garnier -Alexandre González -Alexandre Jomin -Alexandru Sfirlogea -Alexei Margasov -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Alfred Landrum -Ali Dehghani -Alicia Lauerman -Alihan Demir -Allen Madsen -Allen Sun -almoehi -Alvaro Saurin -Alvin Deng -Alvin Richards -amangoel -Amen Belayneh -Amir Goldstein -Amit Bakshi -Amit Krishnan -Amit Shukla -Amr Gawish -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anchal Agrawal -Anda Xu -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Luzzardi -Andrea Turli -Andreas Elvers -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrei Gherzan -Andrei Vagin -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew He -Andrew Hsu -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew McDonnell -Andrew Munsell -Andrew Pennebaker -Andrew Po -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Kolomentsev -Andrey Petrov -Andrey Stolbovsky -André Martins -andy -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Anil Belur -Anil Madhavapeddy -Ankit Jain -Ankush Agarwal -Anonmily -Anran Qiao -Anshul Pundir -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anthony Sottile -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antonis Kalipetis -Antony Messerli -Anuj Bahuguna -Anusha Ragunathan -apocas -Arash Deshmeh -ArikaChen -Arnaud Lefebvre -Arnaud Porterie -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asad Saeeduddin -Asbjørn Enge -averagehuman -Avi Das -Avi Kivity -Avi Miller -Avi Vaid -ayoshitake -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -bdevloed -Ben Bonnefoy -Ben Firshman -Ben Golub -Ben Gould -Ben Hall -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benjamin Baker -Benjamin Boudreau -Benjamin Yolken -Benoit Chesneau -Bernerd Schaefer -Bernhard M. Wiedemann -Bert Goethals -Bevisy Zhang -Bharath Thiruveedula -Bhiraj Butala -Bhumika Bayani -Bilal Amarni -Bill Wang -Bily Zhang -Bin Liu -Bingshen Wang -Blake Geno -Boaz Shuster -bobby abbott -Boris Pruessmann -Boshi Lian -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brett Randall -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Schwind -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Brielle Broder -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bruno Tavares -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Spear -Campbell Allen -Candid Dauth -Cao Weiwei -Carl Henrik Lunde -Carl Loa Odin -Carl X. Su -Carlo Mion -Carlos Alexandro Becker -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Catalin Pirvu -Ce Gao -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander Govindarajan -Chanhun Jeong -Chao Wang -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charles Smith -Charlie Drage -Charlie Lewis -Chase Bolt -ChaYoung You -Chen Chao -Chen Chuanliang -Chen Hanxiao -Chen Min -Chen Mingjie -Chen Qiu -Cheng-mean Liu -Chengfei Shang -Chengguang Xu -chenyuzhu -Chetan Birajdar -Chewey -Chia-liang Kao -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dias -Chris Dituri -Chris Fordham -Chris Gavin -Chris Gibson -Chris Khoo -Chris McKinnel -Chris McKinnel -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Telfer -Chris Wahl -Chris Weyl -Chris White -Christian Berendt -Christian Brauner -Christian Böhme -Christian Muehlhaeuser -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -Christophe Mehay -Christophe Troestler -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Perez -Chun Chen -Ciro S. Costa -Clayton Coleman -Clinton Kitson -Cody Roseborough -Coenraad Loubser -Colin Dunklau -Colin Hebert -Colin Panisset -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Corbin Coleman -Corey Farrell -Cory Forsyth -cressie176 -CrimsonGlory -Cristian Staretu -cristiano balducci -Cruceru Calin-Cristian -CUI Wei -Cyprian Gracz -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damian Smyth -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Feldman -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Stine -Dan Williams -Dani Hodovic -Dani Louca -Daniel Antlinger -Daniel Dao -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Grunwell -Daniel Hiltgen -Daniel J Walsh -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Von Fange -Daniel Watkins -Daniel X Moore -Daniel YC Lin -Daniel Zhang -Danny Berger -Danny Yates -Danyal Khaliq -Darren Coxall -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Davanum Srinivas -Dave Barboza -Dave Goodchild -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Calavera -David Chung -David Corking -David Cramer -David Currie -David Davis -David Dooling -David Gageot -David Gebler -David Glasser -David Lawrence -David Lechner -David M. Karr -David Mackey -David Mat -David Mcanulty -David McKay -David P Hilton -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Trott -David Wang <00107082@163.com> -David Williamson -David Xia -David Young -Davide Ceretti -Dawn Chen -dbdd -dcylabs -Debayan De -Deborah Gertrude Digges -deed02392 -Deep Debroy -Deng Guangxing -Deni Bertovic -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Chen -Dennis Chen -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -devmeyster -Devvyn Murphy -Dharmit Shah -Dhawal Yogesh Bhanushali -Diego Romero -Diego Siqueira -Dieter Reuter -Dillon Dixon -Dima Stopel -Dimitri John Ledkov -Dimitris Mandalidis -Dimitris Rozakis -Dimitry Andric -Dinesh Subhraveti -Ding Fei -Diogo Monica -DiuDiugirl -Djibril Koné -dkumor -Dmitri Logvinenko -Dmitri Shuralyov -Dmitry Demeshchuk -Dmitry Gusev -Dmitry Kononenko -Dmitry Sharshakov -Dmitry Shyshkin -Dmitry Smirnov -Dmitry V. Krivenok -Dmitry Vorobev -Dolph Mathews -Dominik Dingel -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donghwa Kim -Donovan Jones -Doron Podoleanu -Doug Davis -Doug MacEachern -Doug Tangren -Douglas Curtis -Dr Nic Williams -dragon788 -Dražen Lučanin -Drew Erny -Drew Hubl -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivin Giske Skaaren -Eivind Uggedal -Elan Ruusamäe -Elango Sivanandam -Elena Morozova -Eli Uriegas -Elias Faxö -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Davtyan -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -epeterso -Eric Barch -Eric Curtin -Eric G. Noriega -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Soderstrom -Eric Yang -Eric-Olivier Lamey -Erica Windisch -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik St. Martin -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Ethan Bell -Euan Kemp -Eugen Krizo -Eugene Yakubovich -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evelyn Xu -Everett Toews -Evgeny Shmarnev -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Ezra Silvera -Fabian Kramm -Fabian Lauer -Fabian Raetz -Fabiano Rosas -Fabio Falci -Fabio Kung -Fabio Rapposelli -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangming Fang -Fangyuan Gao <21551127@zju.edu.cn> -fanjiyun -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felipe Oliveira -Felix Abecassis -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Ruess -Felix Schindler -Feng Yan -Fengtu Wang -Ferenc Szabo -Fernando -Fero Volar -Ferran Rodenas -Filipe Brandenburger -Filipe Oliveira -Flavio Castelli -Flavio Crisciani -Florian -Florian Klein -Florian Maier -Florian Noeding -Florian Weingarten -Florin Asavoaie -Florin Patan -fonglh -Foysal Iqbal -Francesc Campoy -Francesco Mari -Francis Chuang -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -Frieder Bluemle -Fu JinLin -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Linder -Gabriel Monroy -Gabriel Nicolas Avellaneda -Gaetan de Villele -Galen Sampson -Gang Qiao -Gareth Rushgrove -Garrett Barboza -Gary Schaetz -Gaurav -gautam, prasanna -Gaël PORTAY -Genki Takiuchi -GennadySpb -Geoffrey Bachelet -Geon Kim -George Kontridze -George MacRorie -George Xie -Georgi Hristozov -Gereon Frey -German DZ -Gert van Valkenhoef -Gerwim Feiken -Ghislain Bourgeois -Giampaolo Mancini -Gianluca Borello -Gildas Cuisinier -Giovan Isa Musthofa -gissehel -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Gopikannan Venugopalsamy -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Millar -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Pflaum -Greg Stephens -Greg Thornton -Grzegorz Jaśkiewicz -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Guri -Gurjeet Singh -Guruprasad -Gustav Sinder -gwx296173 -Günter Zöchbauer -haikuoliu -Hakan Özler -Hamish Hutchings -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harley Laue -Harold Cooper -Harrison Turton -Harry Zhang -Harshal Patil -Harshal Patil -He Simei -He Xiaoxi -He Xin -heartlock <21521209@zju.edu.cn> -Hector Castro -Helen Xie -Henning Sprang -Hiroshi Hatake -Hiroyuki Sasagawa -Hobofan -Hollie Teal -Hong Xu -Hongbin Lu -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -Huanzhong Zhang -Huayi Zhang -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hunter Blanks -huqun -Huu Nguyen -hyeongkyu.lee -Hyzhou Zhy -Iago López Galeiras -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Campbell -Ian Chen -Ian Lee -Ian Main -Ian Philpot -Ian Truslove -Iavael -Icaro Seara -Ignacio Capurro -Igor Dolzhikov -Igor Karpovich -Iliana Weller -Ilkka Laukkanen -Ilya Dmitrichenko -Ilya Gusev -Ilya Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Innovimax -Isaac Dupree -Isabel Jimenez -Isao Jonas -Iskander Sharipov -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -Ivan Markin -J Bruni -J. Nunn -Jack Danger Canty -Jack Laxson -Jacob Atzen -Jacob Edelman -Jacob Tomlinson -Jacob Vallejo -Jacob Wen -Jaivish Kothari -Jake Champlin -Jake Moshenko -Jake Sanders -jakedt -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nesbitt -James Nugent -James Turnbull -James Watkins-Harvey -Jamie Hannaford -Jamshid Afshar -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Jannick Fahlbusch -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslaw Zabiello -jaseg -Jasmine Hegman -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -jaxgeller -Jay -Jay -Jay Kamat -Jean Rouge -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Christophe Berthon -Jean-Paul Calderone -Jean-Pierre Huynh -Jean-Tiare Le Bigot -Jeeva S. Chelladhurai -Jeff Anderson -Jeff Hajewski -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Silberman -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Chambers -Jeremy Grosser -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -Jhon Honce -Ji.Zhilong -Jian Liao -Jian Zhang -Jiang Jinyang -Jie Luo -Jihyun Hwang -Jilles Oldenbeuving -Jim Alateras -Jim Galasyn -Jim Minter -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -Jimmy Song -Jinsoo Park -Jintao Zhang -Jiri Appl -Jiri Popelka -Jiuyue Ma -Jiří Župka -Joao Fernandes -Joao Trindade -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johanan Lieberman -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Harris -John Howard (VM) -John Laswell -John Maguire -John Mulhausen -John OBrien III -John Starks -John Stephens -John Tims -John V. Martinez -John Warwick -John Willis -Jon Johnson -Jon Surrell -Jon Wedaman -Jonas Pfenniger -Jonathan A. Schweder -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Choy -Jonathan Dowland -Jonathan Lebon -Jonathan Lomas -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Jonathan Stoppani -Jonh Wendell -Joni Sar -Joost Cassee -Jordan Arentsen -Jordan Jennings -Jordan Sissel -Jorge Marin -Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Joseph Rothrock -Josh -Josh Bodah -Josh Bonczkowski -Josh Chorlton -Josh Eveleth -Josh Hawn -Josh Horwitz -Josh Poimboeuf -Josh Soref -Josh Wilson -Josiah Kiehl -José Tomás Albornoz -Joyce Jang -JP -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Kassar -Julien Maitrehenry -Julien Pervillé -Julio Montes -Jun-Ru Chang -Jussi Nummelin -Justas Brazauskas -Justin Cormack -Justin Force -Justin Menga -Justin Plock -Justin Simonelis -Justin Terry -Justyn Temme -Jyrki Puttonen -Jérôme Petazzoni -Jörg Thalheim -K. Heller -Kai Blin -Kai Qiang Wu (Kennan) -Kamil Domański -Kamjar Gerami -Kanstantsin Shautsou -Kara Alexandra -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Karthik Karanth -Karthik Nayak -Kasper Fabæch Brandt -Kate Heddleston -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -Kay Yan -kayrus -Kazuhiro Sera -Ke Li -Ke Xu -Kei Ohmura -Keith Hudgins -Keli Hu -Ken Cochrane -Ken Herner -Ken ICHIKAWA -Ken Reese -Kenfe-Mickaël Laventure -Kenjiro Nakayama -Kent Johnson -Kevin "qwazerty" Houdebert -Kevin Burke -Kevin Clark -Kevin Feyrer -Kevin J. Lynagh -Kevin Jing Qiu -Kevin Kern -Kevin Menard -Kevin Meredith -Kevin P. Kucharczyk -Kevin Richardson -Kevin Shi -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konstantin Gribov -Konstantin L -Konstantin Pelykh -Krasi Georgiev -Krasimir Georgiev -Kris-Mikael Krister -Kristian Haugene -Kristina Zabunova -Krystian Wojcicki -Kun Zhang -Kunal Kushwaha -Kunal Tyagi -Kyle Conroy -Kyle Linden -Kyle Wuolle -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -Lance Chen -Lance Kinley -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Lars-Magnus Skog -Laszlo Meszaros -Laura Frank -Laurent Erignoux -Laurie Voss -Leandro Siqueira -Lee Chao <932819864@qq.com> -Lee, Meng-Han -leeplay -Lei Gong -Lei Jitang -Len Weincier -Lennie -Leo Gallucci -Leszek Kowalski -Levi Blackstone -Levi Gross -Lewis Daly -Lewis Marshall -Lewis Peckover -Li Yi -Liam Macgillavry -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -Liao Qingwei -Lifubang -Lihua Tang -Lily Guo -limsy -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -liwenqi -lixiaobing10051267 -Liz Zhang -LIZAO LI -Lizzie Dixon <_@lizzie.io> -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Lotus Fenn -Louis Opter -Luca Favatella -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Lucas Chi -Lucas Molas -Lucas Silvestre -Luciano Mores -Luis Martínez de Bartolomé Izquierdo -Luiz Svoboda -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -Luke Marsden -Lyn -Lynda O'Leary -Lénaïc Huard -Ma Müller -Ma Shimiao -Mabin -Madhan Raj Mookkandy -Madhav Puri -Madhu Venugopal -Mageee -Mahesh Tiyyagura -malnick -Malte Janduda -Manfred Touron -Manfred Zabarauskas -Manjunath A Kumatagi -Mansi Nahar -Manuel Meurer -Manuel Rüger -Manuel Woelker -mapk0y -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcel Edmund Franke -Marcelo Horacio Fortino -Marcelo Salazar -Marco Hennings -Marcus Cobden -Marcus Farkas -Marcus Linke -Marcus Martins -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark Jeromin -Mark McGranaghan -Mark McKinstry -Mark Milstein -Mark Oates -Mark Parker -Mark West -Markan Patel -Marko Mikulicic -Marko Tibold -Markus Fix -Markus Kortlang -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Kelly -Martin Mosegaard Amdisen -Martin Muzatko -Martin Redmond -Mary Anthony -Masahito Zembutsu -Masato Ohba -Masayuki Morita -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Champlon -Mathieu Le Marec - Pasquet -Mathieu Parent -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt Hoyle -Matt McCormick -Matt Moore -Matt Richardson -Matt Rickard -Matt Robenolt -Matt Schurenko -Matt Williams -Matthew Heon -Matthew Lapworth -Matthew Mayer -Matthew Mosesohn -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Hauglustaine -Mattias Jernberg -Mauricio Garavaglia -mauriyouth -Max Shytikov -Maxim Fedchyshyn -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Maximiliano Maccanti -Maxwell -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mei ChunTao -Mengdi Gao -Mert Yazıcıoğlu -mgniu -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Irwin -Michael Käufl -Michael Neale -Michael Nussbaum -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Minář -Michal Wieczorek -Michaël Pailloncy -Michał Czeraszkiewicz -Michał Gryko -Michiel de Jong -Mickaël Fortunato -Mickaël Remars -Miguel Angel Fernández -Miguel Morales -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Casas -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Estes -Mike Gaffney -Mike Goelzer -Mike Leone -Mike Lundy -Mike MacCana -Mike Naberezny -Mike Snitzer -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miklos Szegedi -Milind Chawre -Miloslav Trmač -mingqing -Mingzhen Feng -Misty Stanley-Jones -Mitch Capper -Mizuki Urushida -mlarcher -Mohammad Banikazemi -Mohammad Nasirifar -Mohammed Aaqib Ansari -Mohit Soni -Moorthy RS -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mrfly -Mrunal Patel -Muayyad Alsadi -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nace Oroz -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Natasha Jarus -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Naveed Jamil -Neal McBurnett -Neil Horman -Neil Peterson -Nelson Chen -Neyazul Haque -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick DeCoursin -Nick Irvine -Nick Neisen -Nick Parker -Nick Payne -Nick Russo -Nick Stenning -Nick Stinemates -NickrenREN -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolas Sterchele -Nicolas V Castet -Nicolás Hock Isaza -Nigel Poulton -Nik Nyby -Nikhil Chawla -NikolaMandic -Nikolas Garofil -Nikolay Milovanov -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -Noah Meyerhans -Noah Treuhaft -NobodyOnSE -noducks -Nolan Darilek -Noriki Nakamura -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -ohmystack -Ole Reifschneider -Oliver Neal -Oliver Reason -Olivier Gambier -Olle Jonsson -Olli Janatuinen -Omri Shiv -Oriol Francès -Oskar Niburski -Otto Kekäläinen -Ouyang Liduo -Ovidio Mallo -Panagiotis Moustafellos -Paolo G. Giarrusso -Pascal -Pascal Borreli -Pascal Hartig -Patrick Böänziger -Patrick Devine -Patrick Hemmer -Patrick Stapleton -Patrik Cyvoct -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Furtado -Paul Hammond -Paul Jimenez -Paul Kehrer -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Weaver -Paulo Ribeiro -Pavel Lobashov -Pavel Pletenev -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Pavol Vargovcik -Pawel Konczalski -Peeyush Gupta -Peggy Li -Pei Su -Peng Tao -Penghan Wang -Per Weijnitz -perhapszzy@sina.com -Peter Bourgon -Peter Braden -Peter Bücker -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Jaffe -Peter Kang -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Petr Švihlík -Phil -Phil Estes -Phil Spitler -Philip Alexander Etling -Philip Monroe -Philipp Gillé -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -phineas -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Dal-Pra -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Pradip Dhara -Prasanna Gautam -Pratik Karki -Prayag Verma -Priya Wadhwa -Projjol Banerji -Przemek Hejman -Pure White -pysqz -Qiang Huang -Qinglan Peng -qudongfang -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Radostin Stoyanov -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Raja Sami -Rajat Pandit -Rajdeep Dua -Ralf Sippl -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon Brooker -Ramon van Alteren -RaviTeja Pothana -Ray Tsang -ReadmeCritic -Recursive Madman -Reficul -Regan McCooey -Remi Rampin -Remy Suen -Renato Riccieri Santos Zannon -Renaud Gaubert -Rhys Hiltner -Ri Xu -Ricardo N Feliciano -Rich Moyse -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Mathie -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Schneider -Robert Stern -Robert Terhaar -Robert Wallis -Roberto G. Hashioka -Roberto Muñoz Fernández -Robin Naundorf -Robin Schneider -Robin Speekenbrink -robpc -Rodolfo Carvalho -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Rohit Kapur -Rojin George -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Dudin -Roman Strashkin -Ron Smits -Ron Williams -Rong Gao -Rong Zhang -Rongxiang Song -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Royce Remer -Rozhnov Alexandr -Rudolph Gottesheim -Rui Cao -Rui Lopes -Runshen Zhu -Russ Magee -Ryan Abrams -Ryan Anderson -Ryan Aslett -Ryan Belgrave -Ryan Detzel -Ryan Fowler -Ryan Liu -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Simmen -Ryan Stelly -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -Ryan Zhang -ryancooper7 -RyanDeng -Ryo Nakao -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sakeven Jiang -Salahuddin Khan -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -Sandeep Bansal -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Sargun Dhillon -Sascha Andres -Sascha Grunert -Satnam Singh -Satoshi Amemiya -Satoshi Tagomori -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean Lee -Sean McIntyre -Sean OMeara -Sean P. Kane -Sean Rodman -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sergii Kabashniuk -Sergio Lopez -Serhat Gülçiçek -SeungUkLee -Sevki Hasirci -Shane Canon -Shane da Silva -Shaun Kaasten -shaunol -Shawn Landden -Shawn Siefkas -shawnhe -Shayne Wang -Shekhar Gulati -Sheng Yang -Shengbo Song -Shev Yan -Shih-Yuan Lee -Shijiang Wei -Shijun Qin -Shishir Mahajan -Shoubhik Bose -Shourya Sarcar -shuai-z -Shukui Yang -Shuwei Hao -Sian Lerk Lau -Sidhartha Mani -sidharthamani -Silas Sewell -Silvan Jegen -Simão Reis -Simei He -Simon Eskildsen -Simon Ferquel -Simon Leinen -Simon Menke -Simon Taranto -Simon Vikstrom -Sindhu S -Sjoerd Langkemper -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Soulou -Spencer Brown -Spencer Smith -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srinivasan Srivatsan -Stanislav Bondarenko -Steeve Morin -Stefan Berger -Stefan J. Wernli -Stefan Praszalowicz -Stefan S. -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Stephan Spindler -Stephen Crosby -Stephen Day -Stephen Drake -Stephen Rust -Steve Desmond -Steve Dougherty -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Erenst -Steven Hartland -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Subhajit Ghosh -Sujith Haridasan -Sun Gengze <690388648@qq.com> -Sun Jianbo -Sunny Gogoi -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien HOUZÉ -Sébastien Luttringer -Sébastien Stormacq -Tabakhase -Tadej Janež -TAGOMORI Satoshi -tang0th -Tangi Colin -Tatsuki Sugiura -Tatsushi Inagaki -Taylan Isikdemir -Taylor Jones -Ted M. Young -Tehmasp Chaudhri -Tejaswini Duggaraju -Tejesh Mehta -terryding77 <550147740@qq.com> -tgic -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Grainger -Thomas Hansen -Thomas Leonard -Thomas Léveil -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Ti Zhou -Tianon Gravi -Tianyi Wang -Tibor Vass -Tiffany Jernigan -Tiffany Low -Tim Bart -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Potter -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wang -Tim Waugh -Tim Wraight -Tim Zju <21651152@zju.edu.cn> -timfeirg -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Munk -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Booth -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom Sweeney -Tom Wilkie -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš Hrčka -Tonny Xu -Tony Abboud -Tony Daws -Tony Miller -toogley -Torstein Husebø -Tõnis Tiigi -tpng -tracylihui <793912329@qq.com> -Trapier Marshall -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -Trevor Sullivan -Trishna Guha -Tristan Carel -Troy Denton -Tycho Andersen -Tyler Brock -Tyler Brown -Tzu-Jung Lee -uhayate -Ulysse Carion -Umesh Yadav -Utz Bacher -vagrant -Vaidas Jablonskis -vanderliang -Veres Lajos -Victor Algaze -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitaly Ostrosablin -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Pouzanov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vlastimil Zeman -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -Wang Chao -Wang Guoliang -Wang Jie -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Ward Vandewege -WarheadsSE -Wassim Dhif -Wayne Chang -Wayne Song -Weerasak Chongnguluam -Wei Fu -Wei Wu -Wei-Ting Kuo -weipeng -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenjun Tang -Wenkai Yin -Wentao Zhang -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wenzhi Liang -Wes Morgan -Wewang Xiaorenfine -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Martin -William Riancho -William Thurston -WiseTrem -Wolfgang Powisch -Wonjun Kim -xamyzhao -Xian Chaobo -Xianglin Gao -Xianlu Bird -Xiao YongBiao -XiaoBing Jiang -Xiaodong Zhang -Xiaoxi He -Xiaoxu Chen -Xiaoyu Zhang -xichengliudui <1693291525@qq.com> -xiekeyang -Ximo Guanter Gonzálbez -Xinbo Weng -Xinzi Zhou -Xiuming Chen -Xuecong Liao -xuzhaokui -Yadnyawalkya Tale -Yahya -YAMADA Tsuyoshi -Yamasaki Masahide -Yan Feng -Yang Bai -Yang Pengfei -yangchenliang -Yanqiang Miao -Yao Zaiyong -Yassine Tijani -Yasunori Mahata -Yazhong Liu -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongxin Li -Yongzhi Pan -Yosef Fertel -You-Sheng Yang (楊有勝) -Youcef YEKHLEF -Yu Changchun -Yu Chengxia -Yu Peng -Yu-Ju Hong -Yuan Sun -Yuanhong Peng -Yue Zhang -Yuhao Fang -Yuichiro Kaneko -Yunxiang Huang -Yurii Rashkovskii -Yusuf Tarık Günaydın -Yves Junqueira -Zac Dover -Zach Borboa -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -ZhangHang -zhangxianwei -Zhenan Ye <21551168@zju.edu.cn> -zhenghenghuo -Zhenhai Gao -Zhenkun Bi -Zhou Hao -Zhoulin Xie -Zhu Guihua -Zhu Kunjia -Zhuoyun Wei -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -Zou Yu -zqh -Zuhayr Elahi -Zunayed Ali -Álex González -Álvaro Lázaro -Átila Camurça Alves -尹吉峰 -徐俊杰 -慕陶 -搏通 -黄艳红00139573 diff --git a/test/vendor/github.com/docker/docker/LICENSE b/test/vendor/github.com/docker/docker/LICENSE deleted file mode 100644 index 6d8d58fb67..0000000000 --- a/test/vendor/github.com/docker/docker/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/test/vendor/github.com/docker/docker/NOTICE b/test/vendor/github.com/docker/docker/NOTICE deleted file mode 100644 index 58b19b6d15..0000000000 --- a/test/vendor/github.com/docker/docker/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/creack/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/test/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/test/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go deleted file mode 100644 index 5e6310fdcd..0000000000 --- a/test/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ /dev/null @@ -1,93 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" - "os" - "path/filepath" - "strings" -) - -// GetRuntimeDir returns XDG_RUNTIME_DIR. -// XDG_RUNTIME_DIR is typically configured via pam_systemd. -// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetRuntimeDir() (string, error) { - if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { - return xdgRuntimeDir, nil - } - return "", errors.New("could not get XDG_RUNTIME_DIR") -} - -// StickRuntimeDirContents sets the sticky bit on files that are under -// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. -// -// StickyRuntimeDir returns slice of sticked files. -// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func StickRuntimeDirContents(files []string) ([]string, error) { - runtimeDir, err := GetRuntimeDir() - if err != nil { - // ignore error if runtimeDir is empty - return nil, nil - } - runtimeDir, err = filepath.Abs(runtimeDir) - if err != nil { - return nil, err - } - var sticked []string - for _, f := range files { - f, err = filepath.Abs(f) - if err != nil { - return sticked, err - } - if strings.HasPrefix(f, runtimeDir+"/") { - if err = stick(f); err != nil { - return sticked, err - } - sticked = append(sticked, f) - } - } - return sticked, nil -} - -func stick(f string) error { - st, err := os.Stat(f) - if err != nil { - return err - } - m := st.Mode() - m |= os.ModeSticky - return os.Chmod(f, m) -} - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := os.Getenv("HOME") - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := os.Getenv("HOME") - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} diff --git a/test/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/test/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go deleted file mode 100644 index 67ab9e9b31..0000000000 --- a/test/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !linux - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" -) - -// GetRuntimeDir is unsupported on non-linux system. -func GetRuntimeDir() (string, error) { - return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") -} - -// StickRuntimeDirContents is unsupported on non-linux system. -func StickRuntimeDirContents(files []string) ([]string, error) { - return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") -} - -// GetDataHome is unsupported on non-linux system. -func GetDataHome() (string, error) { - return "", errors.New("homedir.GetDataHome() is not supported on this system") -} - -// GetConfigHome is unsupported on non-linux system. -func GetConfigHome() (string, error) { - return "", errors.New("homedir.GetConfigHome() is not supported on this system") -} diff --git a/test/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/test/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go deleted file mode 100644 index 284e8be7ca..0000000000 --- a/test/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !windows - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" - "os/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// If compiling statically, ensure the osusergo build tag is used. -// If needing to do nss lookups, do not compile statically. -func Get() string { - home := os.Getenv(Key()) - if home == "" { - if u, err := user.Current(); err == nil { - return u.HomeDir - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} diff --git a/test/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/test/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go deleted file mode 100644 index 2f81813b28..0000000000 --- a/test/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - return os.Getenv(Key()) -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} diff --git a/test/vendor/github.com/google/go-containerregistry/LICENSE b/test/vendor/github.com/google/go-containerregistry/LICENSE deleted file mode 100644 index 7a4a3ea242..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/test/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go b/test/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go deleted file mode 100644 index 14a05eaa17..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package and provides helpers for adding Close to io.{Reader|Writer}. -package and - -import ( - "io" -) - -// ReadCloser implements io.ReadCloser by reading from a particular io.Reader -// and then calling the provided "Close()" method. -type ReadCloser struct { - io.Reader - CloseFunc func() error -} - -var _ io.ReadCloser = (*ReadCloser)(nil) - -// Close implements io.ReadCloser -func (rac *ReadCloser) Close() error { - return rac.CloseFunc() -} - -// WriteCloser implements io.WriteCloser by reading from a particular io.Writer -// and then calling the provided "Close()" method. -type WriteCloser struct { - io.Writer - CloseFunc func() error -} - -var _ io.WriteCloser = (*WriteCloser)(nil) - -// Close implements io.WriteCloser -func (wac *WriteCloser) Close() error { - return wac.CloseFunc() -} diff --git a/test/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go b/test/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go deleted file mode 100644 index e7d673ff6b..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gzip provides helper functions for interacting with gzipped streams. -package gzip - -import ( - "bufio" - "bytes" - "compress/gzip" - "io" - - "github.com/google/go-containerregistry/internal/and" -) - -var gzipMagicHeader = []byte{'\x1f', '\x8b'} - -// ReadCloser reads uncompressed input data from the io.ReadCloser and -// returns an io.ReadCloser from which compressed data may be read. -// This uses gzip.BestSpeed for the compression level. -func ReadCloser(r io.ReadCloser) io.ReadCloser { - return ReadCloserLevel(r, gzip.BestSpeed) -} - -// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and -// returns an io.ReadCloser from which compressed data may be read. -// Refer to compress/gzip for the level: -// https://golang.org/pkg/compress/gzip/#pkg-constants -func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser { - pr, pw := io.Pipe() - - // For highly compressible layers, gzip.Writer will output a very small - // number of bytes per Write(). This is normally fine, but when pushing - // to a registry, we want to ensure that we're taking full advantage of - // the available bandwidth instead of sending tons of tiny writes over - // the wire. - // 64K ought to be small enough for anybody. - bw := bufio.NewWriterSize(pw, 2<<16) - - // Returns err so we can pw.CloseWithError(err) - go func() error { - // TODO(go1.14): Just defer {pw,gw,r}.Close like you'd expect. - // Context: https://golang.org/issue/24283 - gw, err := gzip.NewWriterLevel(bw, level) - if err != nil { - return pw.CloseWithError(err) - } - - if _, err := io.Copy(gw, r); err != nil { - defer r.Close() - defer gw.Close() - return pw.CloseWithError(err) - } - - // Close gzip writer to Flush it and write gzip trailers. - if err := gw.Close(); err != nil { - return pw.CloseWithError(err) - } - - // Flush bufio writer to ensure we write out everything. - if err := bw.Flush(); err != nil { - return pw.CloseWithError(err) - } - - // We don't really care if these fail. - defer pw.Close() - defer r.Close() - - return nil - }() - - return pr -} - -// UnzipReadCloser reads compressed input data from the io.ReadCloser and -// returns an io.ReadCloser from which uncompessed data may be read. -func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { - gr, err := gzip.NewReader(r) - if err != nil { - return nil, err - } - return &and.ReadCloser{ - Reader: gr, - CloseFunc: func() error { - // If the unzip fails, then this seems to return the same - // error as the read. We don't want this to interfere with - // us closing the main ReadCloser, since this could leave - // an open file descriptor (fails on Windows). - gr.Close() - return r.Close() - }, - }, nil -} - -// Is detects whether the input stream is compressed. -func Is(r io.Reader) (bool, error) { - magicHeader := make([]byte, 2) - n, err := r.Read(magicHeader) - if n == 0 && err == io.EOF { - return false, nil - } - if err != nil { - return false, err - } - return bytes.Equal(magicHeader, gzipMagicHeader), nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/internal/redact/redact.go b/test/vendor/github.com/google/go-containerregistry/internal/redact/redact.go deleted file mode 100644 index dc9c56b7f3..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/internal/redact/redact.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package redact contains a simple context signal for redacting requests. -package redact - -import ( - "context" -) - -type contextKey string - -var redactKey = contextKey("redact") - -// NewContext creates a new ctx with the reason for redaction. -func NewContext(ctx context.Context, reason string) context.Context { - return context.WithValue(ctx, redactKey, reason) -} - -// FromContext returns the redaction reason, if any. -func FromContext(ctx context.Context) (bool, string) { - reason, ok := ctx.Value(redactKey).(string) - return ok, reason -} diff --git a/test/vendor/github.com/google/go-containerregistry/internal/retry/retry.go b/test/vendor/github.com/google/go-containerregistry/internal/retry/retry.go deleted file mode 100644 index 133cb1c122..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/internal/retry/retry.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package retry provides methods for retrying operations. It is a thin wrapper -// around k8s.io/apimachinery/pkg/util/wait to make certain operations easier. -package retry - -import ( - "context" - "fmt" - - "github.com/google/go-containerregistry/internal/retry/wait" -) - -// Backoff is an alias of our own wait.Backoff to avoid name conflicts with -// the kubernetes wait package. Typing retry.Backoff is aesier than fixing -// the wrong import every time you use wait.Backoff. -type Backoff = wait.Backoff - -// This is implemented by several errors in the net package as well as our -// transport.Error. -type temporary interface { - Temporary() bool -} - -// IsTemporary returns true if err implements Temporary() and it returns true. -func IsTemporary(err error) bool { - if err == context.DeadlineExceeded { - return false - } - if te, ok := err.(temporary); ok && te.Temporary() { - return true - } - return false -} - -// IsNotNil returns true if err is not nil. -func IsNotNil(err error) bool { - return err != nil -} - -// Predicate determines whether an error should be retried. -type Predicate func(error) (retry bool) - -// Retry retries a given function, f, until a predicate is satisfied, using -// exponential backoff. If the predicate is never satisfied, it will return the -// last error returned by f. -func Retry(f func() error, p Predicate, backoff wait.Backoff) (err error) { - if f == nil { - return fmt.Errorf("nil f passed to retry") - } - if p == nil { - return fmt.Errorf("nil p passed to retry") - } - - condition := func() (bool, error) { - err = f() - if p(err) { - return false, nil - } - return true, err - } - - wait.ExponentialBackoff(backoff, condition) - return -} diff --git a/test/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go b/test/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go deleted file mode 100644 index ab06e5f109..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package wait is a subset of k8s.io/apimachinery to avoid conflicts -// in dependencies (specifically, logging). -package wait - -import ( - "errors" - "math/rand" - "time" -) - -// Jitter returns a time.Duration between duration and duration + maxFactor * -// duration. -// -// This allows clients to avoid converging on periodic behavior. If maxFactor -// is 0.0, a suggested default value will be chosen. -func Jitter(duration time.Duration, maxFactor float64) time.Duration { - if maxFactor <= 0.0 { - maxFactor = 1.0 - } - wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) - return wait -} - -// ErrWaitTimeout is returned when the condition exited without success. -var ErrWaitTimeout = errors.New("timed out waiting for the condition") - -// ConditionFunc returns true if the condition is satisfied, or an error -// if the loop should be aborted. -type ConditionFunc func() (done bool, err error) - -// Backoff holds parameters applied to a Backoff function. -type Backoff struct { - // The initial duration. - Duration time.Duration - // Duration is multiplied by factor each iteration, if factor is not zero - // and the limits imposed by Steps and Cap have not been reached. - // Should not be negative. - // The jitter does not contribute to the updates to the duration parameter. - Factor float64 - // The sleep at each iteration is the duration plus an additional - // amount chosen uniformly at random from the interval between - // zero and `jitter*duration`. - Jitter float64 - // The remaining number of iterations in which the duration - // parameter may change (but progress can be stopped earlier by - // hitting the cap). If not positive, the duration is not - // changed. Used for exponential backoff in combination with - // Factor and Cap. - Steps int - // A limit on revised values of the duration parameter. If a - // multiplication by the factor parameter would make the duration - // exceed the cap then the duration is set to the cap and the - // steps parameter is set to zero. - Cap time.Duration -} - -// Step (1) returns an amount of time to sleep determined by the -// original Duration and Jitter and (2) mutates the provided Backoff -// to update its Steps and Duration. -func (b *Backoff) Step() time.Duration { - if b.Steps < 1 { - if b.Jitter > 0 { - return Jitter(b.Duration, b.Jitter) - } - return b.Duration - } - b.Steps-- - - duration := b.Duration - - // calculate the next step - if b.Factor != 0 { - b.Duration = time.Duration(float64(b.Duration) * b.Factor) - if b.Cap > 0 && b.Duration > b.Cap { - b.Duration = b.Cap - b.Steps = 0 - } - } - - if b.Jitter > 0 { - duration = Jitter(duration, b.Jitter) - } - return duration -} - -// ExponentialBackoff repeats a condition check with exponential backoff. -// -// It repeatedly checks the condition and then sleeps, using `backoff.Step()` -// to determine the length of the sleep and adjust Duration and Steps. -// Stops and returns as soon as: -// 1. the condition check returns true or an error, -// 2. `backoff.Steps` checks of the condition have been done, or -// 3. a sleep truncated by the cap on duration has been completed. -// In case (1) the returned error is what the condition function returned. -// In all other cases, ErrWaitTimeout is returned. -func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - if ok, err := condition(); err != nil || ok { - return err - } - if backoff.Steps == 1 { - break - } - time.Sleep(backoff.Step()) - } - return ErrWaitTimeout -} diff --git a/test/vendor/github.com/google/go-containerregistry/internal/verify/verify.go b/test/vendor/github.com/google/go-containerregistry/internal/verify/verify.go deleted file mode 100644 index 4446803800..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/internal/verify/verify.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package verify provides a ReadCloser that verifies content matches the -// expected hash values. -package verify - -import ( - "encoding/hex" - "fmt" - "hash" - "io" - - "github.com/google/go-containerregistry/internal/and" - v1 "github.com/google/go-containerregistry/pkg/v1" -) - -type verifyReader struct { - inner io.Reader - hasher hash.Hash - expected v1.Hash -} - -// Read implements io.Reader -func (vc *verifyReader) Read(b []byte) (int, error) { - n, err := vc.inner.Read(b) - if err == io.EOF { - got := hex.EncodeToString(vc.hasher.Sum(make([]byte, 0, vc.hasher.Size()))) - if want := vc.expected.Hex; got != want { - return n, fmt.Errorf("error verifying %s checksum; got %q, want %q", - vc.expected.Algorithm, got, want) - } - } - return n, err -} - -// ReadCloser wraps the given io.ReadCloser to verify that its contents match -// the provided v1.Hash before io.EOF is returned. -func ReadCloser(r io.ReadCloser, h v1.Hash) (io.ReadCloser, error) { - w, err := v1.Hasher(h.Algorithm) - if err != nil { - return nil, err - } - r2 := io.TeeReader(r, w) - return &and.ReadCloser{ - Reader: &verifyReader{ - inner: r2, - hasher: w, - expected: h, - }, - CloseFunc: r.Close, - }, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/authn/README.md b/test/vendor/github.com/google/go-containerregistry/pkg/authn/README.md deleted file mode 100644 index 1eb17c7ab1..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/authn/README.md +++ /dev/null @@ -1,242 +0,0 @@ -# `authn` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/authn?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/authn) - -This README outlines how we acquire and use credentials when interacting with a registry. - -As much as possible, we attempt to emulate docker's authentication behavior and configuration so that this library "just works" if you've already configured credentials that work with docker; however, when things don't work, a basic understanding of what's going on can help with debugging. - -The official documentation for how docker authentication works is (reasonably) scattered across several different sites and GitHub repositories, so we've tried to summarize the relevant bits here. - -## tl;dr for consumers of this package - -By default, [`pkg/v1/remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) uses [`Anonymous`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#Anonymous) credentials (i.e. _none_), which for most registries will only allow read access to public images. - -To use the credentials found in your docker config file, you can use the [`DefaultKeychain`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#DefaultKeychain), e.g.: - -```go -package main - -import ( - "fmt" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" -) - -func main() { - ref, err := name.ParseReference("registry.example.com/private/repo") - if err != nil { - panic(err) - } - - // Fetch the manifest using default credentials. - img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) - if err != nil { - panic(err) - } - - // Prints the digest of registry.example.com/private/repo - fmt.Println(img.Digest) -} -``` - -(If you're only using [gcr.io](https://gcr.io), see the [`pkg/v1/google.Keychain`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/google#Keychain), which emulates [`docker-credential-gcr`](https://github.com/GoogleCloudPlatform/docker-credential-gcr).) - -## The Config File - -This file contains various configuration options for docker and is (by default) located at: -* `$HOME/.docker/config.json` (on linux and darwin), or -* `%USERPROFILE%\.docker\config.json` (on windows). - -You can override this location with the `DOCKER_CONFIG` environment variable. - -### Plaintext - -The config file is where your credentials are stored when you invoke `docker login`, e.g. the contents may look something like this: - -```json -{ - "auths": { - "registry.example.com": { - "auth": "QXp1cmVEaWFtb25kOmh1bnRlcjI=" - } - } -} -``` - -The `auths` map has an entry per registry, and the `auth` field contains your username and password encoded as [HTTP 'Basic' Auth](https://tools.ietf.org/html/rfc7617). - -**NOTE**: This means that your credentials are stored _in plaintext_: - -```bash -$ echo "QXp1cmVEaWFtb25kOmh1bnRlcjI=" | base64 -d -AzureDiamond:hunter2 -``` - -For what it's worth, this config file is equivalent to: - -```json -{ - "auths": { - "registry.example.com": { - "username": "AzureDiamond", - "password": "hunter2" - } - } -} -``` - -... which is useful to know if e.g. your CI system provides you a registry username and password via environment variables and you want to populate this file manually without invoking `docker login`. - -### Helpers - -If you log in like this, docker will warn you that you should use a [credential helper](https://docs.docker.com/engine/reference/commandline/login/#credentials-store), and you should! - -To configure a global credential helper: -```json -{ - "credsStore": "osxkeychain" -} -``` - -To configure a per-registry credential helper: -```json -{ - "credHelpers": { - "gcr.io": "gcr" - } -} -``` - -We use [`github.com/docker/cli/cli/config.Load`](https://godoc.org/github.com/docker/cli/cli/config#Load) to parse the config file and invoke any necessary credential helpers. This handles the logic of taking a [`ConfigFile`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/configfile/file.go#L25-L54) + registry domain and producing an [`AuthConfig`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L3-L22), which determines how we authenticate to the registry. - -## Credential Helpers - -The [credential helper protocol](https://github.com/docker/docker-credential-helpers) allows you to configure a binary that supplies credentials for the registry, rather than hard-coding them in the config file. - -The protocol has several verbs, but the one we most care about is `get`. - -For example, using the following config file: -```json -{ - "credHelpers": { - "gcr.io": "gcr", - "eu.gcr.io": "gcr" - } -} -``` - -To acquire credentials for `gcr.io`, we look in the `credHelpers` map to find -the credential helper for `gcr.io` is `gcr`. By appending that value to -`docker-credential-`, we can get the name of the binary we need to use. - -For this example, that's `docker-credential-gcr`, which must be on our `$PATH`. -We'll then invoke that binary to get credentials: - -```bash -$ echo "gcr.io" | docker-credential-gcr get -{"Username":"_token","Secret":""} -``` - -You can configure the same credential helper for multiple registries, which is -why we need to pass the domain in via STDIN, e.g. if we were trying to access -`eu.gcr.io`, we'd do this instead: - -```bash -$ echo "eu.gcr.io" | docker-credential-gcr get -{"Username":"_token","Secret":""} -``` - -### Debugging credential helpers - -If a credential helper is configured but doesn't seem to be working, it can be -challenging to debug. Implementing a fake credential helper lets you poke around -to make it easier to see where the failure is happening. - -This "implements" a credential helper with hard-coded values: -``` -#!/usr/bin/env bash -echo '{"Username":"","Secret":"hunter2"}' -``` - - -This implements a credential helper that prints the output of -`docker-credential-gcr` to both stderr and whatever called it, which allows you -to snoop on another credential helper: -``` -#!/usr/bin/env bash -docker-credential-gcr $@ | tee >(cat 1>&2) -``` - -Put those files somewhere on your path, naming them e.g. -`docker-credential-hardcoded` and `docker-credential-tee`, then modify the -config file to use them: - -```json -{ - "credHelpers": { - "gcr.io": "tee", - "eu.gcr.io": "hardcoded" - } -} -``` - -The `docker-credential-tee` trick works with both `crane` and `docker`: - -```bash -$ crane manifest gcr.io/google-containers/pause > /dev/null -{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":""} - -$ docker pull gcr.io/google-containers/pause -Using default tag: latest -{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":""} -latest: Pulling from google-containers/pause -a3ed95caeb02: Pull complete -4964c72cd024: Pull complete -Digest: sha256:a78c2d6208eff9b672de43f880093100050983047b7b0afe0217d3656e1b0d5f -Status: Downloaded newer image for gcr.io/google-containers/pause:latest -gcr.io/google-containers/pause:latest -``` - -## The Registry - -There are two methods for authenticating against a registry: -[token](https://docs.docker.com/registry/spec/auth/token/) and -[oauth2](https://docs.docker.com/registry/spec/auth/oauth/). - -Both methods are used to acquire an opaque `Bearer` token (or -[RegistryToken](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L21)) -to use in the `Authorization` header. The registry will return a `401 -Unauthorized` during the [version -check](https://github.com/opencontainers/distribution-spec/blob/2c3975d1f03b67c9a0203199038adea0413f0573/spec.md#api-version-check) -(or during normal operations) with -[Www-Authenticate](https://tools.ietf.org/html/rfc7235#section-4.1) challenge -indicating how to proceed. - -### Token - -If we get back an `AuthConfig` containing a [`Username/Password`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L5-L6) -or -[`Auth`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L7), -we'll use the token method for authentication: - -![basic](../../images/credhelper-basic.svg) - -### OAuth 2 - -If we get back an `AuthConfig` containing an [`IdentityToken`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L18) -we'll use the oauth2 method for authentication: - -![oauth](../../images/credhelper-oauth.svg) - -This happens when a credential helper returns a response with the -[`Username`](https://github.com/docker/docker-credential-helpers/blob/f78081d1f7fef6ad74ad6b79368de6348386e591/credentials/credentials.go#L16) -set to `` (no, that's not a placeholder, the literal string `""`). -It is unclear why: [moby/moby#36926](https://github.com/moby/moby/issues/36926). - -We only support the oauth2 `grant_type` for `refresh_token` ([#629](https://github.com/google/go-containerregistry/issues/629)), -since it's impossible to determine from the registry response whether we should -use oauth, and the token method for authentication is widely implemented by -registries. diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go b/test/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go deleted file mode 100644 index 83214957d5..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -// anonymous implements Authenticator for anonymous authentication. -type anonymous struct{} - -// Authorization implements Authenticator. -func (a *anonymous) Authorization() (*AuthConfig, error) { - return &AuthConfig{}, nil -} - -// Anonymous is a singleton Authenticator for providing anonymous auth. -var Anonymous Authenticator = &anonymous{} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go b/test/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go deleted file mode 100644 index 0111f1ae72..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -// auth is an Authenticator that simply returns the wrapped AuthConfig. -type auth struct { - config AuthConfig -} - -// FromConfig returns an Authenticator that just returns the given AuthConfig. -func FromConfig(cfg AuthConfig) Authenticator { - return &auth{cfg} -} - -// Authorization implements Authenticator. -func (a *auth) Authorization() (*AuthConfig, error) { - return &a.config, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go b/test/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go deleted file mode 100644 index 690e81d058..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -// AuthConfig contains authorization information for connecting to a Registry -// Inlined what we use from github.com/docker/cli/cli/config/types -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} - -// Authenticator is used to authenticate Docker transports. -type Authenticator interface { - // Authorization returns the value to use in an http transport's Authorization header. - Authorization() (*AuthConfig, error) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go b/test/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go deleted file mode 100644 index 500cb6616f..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -// Basic implements Authenticator for basic authentication. -type Basic struct { - Username string - Password string -} - -// Authorization implements Authenticator. -func (b *Basic) Authorization() (*AuthConfig, error) { - return &AuthConfig{ - Username: b.Username, - Password: b.Password, - }, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go b/test/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go deleted file mode 100644 index 4cf86df92f..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -// Bearer implements Authenticator for bearer authentication. -type Bearer struct { - Token string `json:"token"` -} - -// Authorization implements Authenticator. -func (b *Bearer) Authorization() (*AuthConfig, error) { - return &AuthConfig{ - RegistryToken: b.Token, - }, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go b/test/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go deleted file mode 100644 index c2a5fc0267..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package authn defines different methods of authentication for -// talking to a container registry. -package authn diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/test/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go deleted file mode 100644 index 60eebc7599..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -import ( - "os" - - "github.com/docker/cli/cli/config" - "github.com/docker/cli/cli/config/types" - "github.com/google/go-containerregistry/pkg/name" -) - -// Resource represents a registry or repository that can be authenticated against. -type Resource interface { - // String returns the full string representation of the target, e.g. - // gcr.io/my-project or just gcr.io. - String() string - - // RegistryStr returns just the registry portion of the target, e.g. for - // gcr.io/my-project, this should just return gcr.io. This is needed to - // pull out an appropriate hostname. - RegistryStr() string -} - -// Keychain is an interface for resolving an image reference to a credential. -type Keychain interface { - // Resolve looks up the most appropriate credential for the specified target. - Resolve(Resource) (Authenticator, error) -} - -// defaultKeychain implements Keychain with the semantics of the standard Docker -// credential keychain. -type defaultKeychain struct{} - -var ( - // DefaultKeychain implements Keychain by interpreting the docker config file. - DefaultKeychain Keychain = &defaultKeychain{} -) - -const ( - // DefaultAuthKey is the key used for dockerhub in config files, which - // is hardcoded for historical reasons. - DefaultAuthKey = "https://" + name.DefaultRegistry + "/v1/" -) - -// Resolve implements Keychain. -func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) { - cf, err := config.Load(os.Getenv("DOCKER_CONFIG")) - if err != nil { - return nil, err - } - - // See: - // https://github.com/google/ko/issues/90 - // https://github.com/moby/moby/blob/fc01c2b481097a6057bec3cd1ab2d7b4488c50c4/registry/config.go#L397-L404 - key := target.RegistryStr() - if key == name.DefaultRegistry { - key = DefaultAuthKey - } - - cfg, err := cf.GetAuthConfig(key) - if err != nil { - return nil, err - } - - empty := types.AuthConfig{} - if cfg == empty { - return Anonymous, nil - } - return FromConfig(AuthConfig{ - Username: cfg.Username, - Password: cfg.Password, - Auth: cfg.Auth, - IdentityToken: cfg.IdentityToken, - RegistryToken: cfg.RegistryToken, - }), nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go b/test/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go deleted file mode 100644 index 3b1804f5d0..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package authn - -type multiKeychain struct { - keychains []Keychain -} - -// Assert that our multi-keychain implements Keychain. -var _ (Keychain) = (*multiKeychain)(nil) - -// NewMultiKeychain composes a list of keychains into one new keychain. -func NewMultiKeychain(kcs ...Keychain) Keychain { - return &multiKeychain{keychains: kcs} -} - -// Resolve implements Keychain. -func (mk *multiKeychain) Resolve(target Resource) (Authenticator, error) { - for _, kc := range mk.keychains { - auth, err := kc.Resolve(target) - if err != nil { - return nil, err - } - if auth != Anonymous { - return auth, nil - } - } - return Anonymous, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go b/test/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go deleted file mode 100644 index 5d25d63d61..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package logs exposes the loggers used by this library. -package logs - -import ( - "io/ioutil" - "log" -) - -var ( - // Warn is used to log non-fatal errors. - Warn = log.New(ioutil.Discard, "", log.LstdFlags) - - // Progress is used to log notable, successful events. - Progress = log.New(ioutil.Discard, "", log.LstdFlags) - - // Debug is used to log information that is useful for debugging. - Debug = log.New(ioutil.Discard, "", log.LstdFlags) -) - -// Enabled checks to see if the logger's writer is set to something other -// than ioutil.Discard. This allows callers to avoid expensive operations -// that will end up in /dev/null anyway. -func Enabled(l *log.Logger) bool { - return l.Writer() != ioutil.Discard -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/name/README.md b/test/vendor/github.com/google/go-containerregistry/pkg/name/README.md deleted file mode 100644 index 4889b8446a..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/name/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `name` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/name?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/name) diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/name/check.go b/test/vendor/github.com/google/go-containerregistry/pkg/name/check.go deleted file mode 100644 index 01b03e5626..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/name/check.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "strings" - "unicode/utf8" -) - -// stripRunesFn returns a function which returns -1 (i.e. a value which -// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise. -func stripRunesFn(runes string) func(rune) rune { - return func(r rune) rune { - if strings.ContainsRune(runes, r) { - return -1 - } - return r - } -} - -// checkElement checks a given named element matches character and length restrictions. -// Returns true if the given element adheres to the given restrictions, false otherwise. -func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error { - numRunes := utf8.RuneCountInString(element) - if (numRunes < minRunes) || (maxRunes < numRunes) { - return NewErrBadName("%s must be between %d and %d runes in length: %s", name, minRunes, maxRunes, element) - } else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 { - return NewErrBadName("%s can only contain the runes `%s`: %s", name, allowedRunes, element) - } - return nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/test/vendor/github.com/google/go-containerregistry/pkg/name/digest.go deleted file mode 100644 index 120dd216ab..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/name/digest.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "strings" -) - -const ( - // These have the form: sha256: - // TODO(dekkagaijin): replace with opencontainers/go-digest or docker/distribution's validation. - digestChars = "sh:0123456789abcdef" - digestDelim = "@" -) - -// Digest stores a digest name in a structured form. -type Digest struct { - Repository - digest string - original string -} - -// Ensure Digest implements Reference -var _ Reference = (*Digest)(nil) - -// Context implements Reference. -func (d Digest) Context() Repository { - return d.Repository -} - -// Identifier implements Reference. -func (d Digest) Identifier() string { - return d.DigestStr() -} - -// DigestStr returns the digest component of the Digest. -func (d Digest) DigestStr() string { - return d.digest -} - -// Name returns the name from which the Digest was derived. -func (d Digest) Name() string { - return d.Repository.Name() + digestDelim + d.DigestStr() -} - -// String returns the original input string. -func (d Digest) String() string { - return d.original -} - -func checkDigest(name string) error { - return checkElement("digest", name, digestChars, 7+64, 7+64) -} - -// NewDigest returns a new Digest representing the given name. -func NewDigest(name string, opts ...Option) (Digest, error) { - // Split on "@" - parts := strings.Split(name, digestDelim) - if len(parts) != 2 { - return Digest{}, NewErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name) - } - base := parts[0] - digest := parts[1] - - // Always check that the digest is valid. - if err := checkDigest(digest); err != nil { - return Digest{}, err - } - - tag, err := NewTag(base, opts...) - if err == nil { - base = tag.Repository.Name() - } - - repo, err := NewRepository(base, opts...) - if err != nil { - return Digest{}, err - } - return Digest{ - Repository: repo, - digest: digest, - original: name, - }, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/name/doc.go b/test/vendor/github.com/google/go-containerregistry/pkg/name/doc.go deleted file mode 100644 index b294794dc1..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/name/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package name defines structured types for representing image references. -// -// What's in a name? For image references, not nearly enough! -// -// Image references look a lot like URLs, but they differ in that they don't -// contain the scheme (http or https), they can end with a :tag or a @digest -// (the latter being validated), and they perform defaulting for missing -// components. -// -// Since image references don't contain the scheme, we do our best to infer -// if we use http or https from the given hostname. We allow http fallback for -// any host that looks like localhost (localhost, 127.0.0.1, ::1), ends in -// ".local", or is in the "private" address space per RFC 1918. For everything -// else, we assume https only. To override this heuristic, use the Insecure -// option. -// -// Image references with a digest signal to us that we should verify the content -// of the image matches the digest. E.g. when pulling a Digest reference, we'll -// calculate the sha256 of the manifest returned by the registry and error out -// if it doesn't match what we asked for. -// -// For defaulting, we interpret "ubuntu" as -// "index.docker.io/library/ubuntu:latest" because we add the missing repo -// "library", the missing registry "index.docker.io", and the missing tag -// "latest". To disable this defaulting, use the StrictValidation option. This -// is useful e.g. to only allow image references that explicitly set a tag or -// digest, so that you don't accidentally pull "latest". -package name diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/name/errors.go b/test/vendor/github.com/google/go-containerregistry/pkg/name/errors.go deleted file mode 100644 index 7847cc5d1e..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/name/errors.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import "fmt" - -// ErrBadName is an error for when a bad docker name is supplied. -type ErrBadName struct { - info string -} - -func (e *ErrBadName) Error() string { - return e.info -} - -// NewErrBadName returns a ErrBadName which returns the given formatted string from Error(). -func NewErrBadName(fmtStr string, args ...interface{}) *ErrBadName { - return &ErrBadName{fmt.Sprintf(fmtStr, args...)} -} - -// IsErrBadName returns true if the given error is an ErrBadName. -func IsErrBadName(err error) bool { - _, ok := err.(*ErrBadName) - return ok -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/name/options.go b/test/vendor/github.com/google/go-containerregistry/pkg/name/options.go deleted file mode 100644 index d14fedcdad..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/name/options.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -const ( - // DefaultRegistry is the registry name that will be used if no registry - // provided and the default is not overridden. - DefaultRegistry = "index.docker.io" - defaultRegistryAlias = "docker.io" - - // DefaultTag is the tag name that will be used if no tag provided and the - // default is not overridden. - DefaultTag = "latest" -) - -type options struct { - strict bool // weak by default - insecure bool // secure by default - defaultRegistry string - defaultTag string -} - -func makeOptions(opts ...Option) options { - opt := options{ - defaultRegistry: DefaultRegistry, - defaultTag: DefaultTag, - } - for _, o := range opts { - o(&opt) - } - return opt -} - -// Option is a functional option for name parsing. -type Option func(*options) - -// StrictValidation is an Option that requires image references to be fully -// specified; i.e. no defaulting for registry (dockerhub), repo (library), -// or tag (latest). -func StrictValidation(opts *options) { - opts.strict = true -} - -// WeakValidation is an Option that sets defaults when parsing names, see -// StrictValidation. -func WeakValidation(opts *options) { - opts.strict = false -} - -// Insecure is an Option that allows image references to be fetched without TLS. -func Insecure(opts *options) { - opts.insecure = true -} - -// OptionFn is a function that returns an option. -type OptionFn func() Option - -// WithDefaultRegistry sets the default registry that will be used if one is not -// provided. -func WithDefaultRegistry(r string) Option { - return func(opts *options) { - opts.defaultRegistry = r - } -} - -// WithDefaultTag sets the default tag that will be used if one is not provided. -func WithDefaultTag(t string) Option { - return func(opts *options) { - opts.defaultTag = t - } -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/test/vendor/github.com/google/go-containerregistry/pkg/name/ref.go deleted file mode 100644 index e5180b3d0a..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/name/ref.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "fmt" -) - -// Reference defines the interface that consumers use when they can -// take either a tag or a digest. -type Reference interface { - fmt.Stringer - - // Context accesses the Repository context of the reference. - Context() Repository - - // Identifier accesses the type-specific portion of the reference. - Identifier() string - - // Name is the fully-qualified reference name. - Name() string - - // Scope is the scope needed to access this reference. - Scope(string) string -} - -// ParseReference parses the string as a reference, either by tag or digest. -func ParseReference(s string, opts ...Option) (Reference, error) { - if t, err := NewTag(s, opts...); err == nil { - return t, nil - } - if d, err := NewDigest(s, opts...); err == nil { - return d, nil - } - return nil, NewErrBadName("could not parse reference: " + s) - -} - -type stringConst string - -// MustParseReference behaves like ParseReference, but panics instead of -// returning an error. It's intended for use in tests, or when a value is -// expected to be valid at code authoring time. -// -// To discourage its use in scenarios where the value is not known at code -// authoring time, it must be passed a string constant: -// -// const str = "valid/string" -// MustParseReference(str) -// MustParseReference("another/valid/string") -// MustParseReference(str + "/and/more") -// -// These will not compile: -// -// var str = "valid/string" -// MustParseReference(str) -// MustParseReference(strings.Join([]string{"valid", "string"}, "/")) -func MustParseReference(s stringConst, opts ...Option) Reference { - ref, err := ParseReference(string(s), opts...) - if err != nil { - panic(err) - } - return ref -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/test/vendor/github.com/google/go-containerregistry/pkg/name/registry.go deleted file mode 100644 index d4da7409e8..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/name/registry.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "net" - "net/url" - "regexp" - "strings" -) - -// Detect more complex forms of local references. -var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`) - -// Detect the loopback IP (127.0.0.1) -var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1")) - -// Detect the loopback IPV6 (::1) -var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1")) - -// Registry stores a docker registry name in a structured form. -type Registry struct { - insecure bool - registry string -} - -// RegistryStr returns the registry component of the Registry. -func (r Registry) RegistryStr() string { - return r.registry -} - -// Name returns the name from which the Registry was derived. -func (r Registry) Name() string { - return r.RegistryStr() -} - -func (r Registry) String() string { - return r.Name() -} - -// Scope returns the scope required to access the registry. -func (r Registry) Scope(string) string { - // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z - return "registry:catalog:*" -} - -func (r Registry) isRFC1918() bool { - ipStr := strings.Split(r.Name(), ":")[0] - ip := net.ParseIP(ipStr) - if ip == nil { - return false - } - for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} { - _, block, _ := net.ParseCIDR(cidr) - if block.Contains(ip) { - return true - } - } - return false -} - -// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined. -func (r Registry) Scheme() string { - if r.insecure { - return "http" - } - if r.isRFC1918() { - return "http" - } - if strings.HasPrefix(r.Name(), "localhost:") { - return "http" - } - if reLocal.MatchString(r.Name()) { - return "http" - } - if reLoopback.MatchString(r.Name()) { - return "http" - } - if reipv6Loopback.MatchString(r.Name()) { - return "http" - } - return "https" -} - -func checkRegistry(name string) error { - // Per RFC 3986, registries (authorities) are required to be prefixed with "//" - // url.Host == hostname[:port] == authority - if url, err := url.Parse("//" + name); err != nil || url.Host != name { - return NewErrBadName("registries must be valid RFC 3986 URI authorities: %s", name) - } - return nil -} - -// NewRegistry returns a Registry based on the given name. -// Strict validation requires explicit, valid RFC 3986 URI authorities to be given. -func NewRegistry(name string, opts ...Option) (Registry, error) { - opt := makeOptions(opts...) - if opt.strict && len(name) == 0 { - return Registry{}, NewErrBadName("strict validation requires the registry to be explicitly defined") - } - - if err := checkRegistry(name); err != nil { - return Registry{}, err - } - - if name == "" { - name = opt.defaultRegistry - } - // Rewrite "docker.io" to "index.docker.io". - // See: https://github.com/google/go-containerregistry/issues/68 - if name == defaultRegistryAlias { - name = DefaultRegistry - } - - return Registry{registry: name, insecure: opt.insecure}, nil -} - -// NewInsecureRegistry returns an Insecure Registry based on the given name. -// -// Deprecated: Use the Insecure Option with NewRegistry instead. -func NewInsecureRegistry(name string, opts ...Option) (Registry, error) { - opts = append(opts, Insecure) - return NewRegistry(name, opts...) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/test/vendor/github.com/google/go-containerregistry/pkg/name/repository.go deleted file mode 100644 index 54367a15cd..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/name/repository.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "fmt" - "strings" -) - -const ( - defaultNamespace = "library" - repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./" - regRepoDelimiter = "/" -) - -// Repository stores a docker repository name in a structured form. -type Repository struct { - Registry - repository string -} - -// See https://docs.docker.com/docker-hub/official_repos -func hasImplicitNamespace(repo string, reg Registry) bool { - return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry -} - -// RepositoryStr returns the repository component of the Repository. -func (r Repository) RepositoryStr() string { - if hasImplicitNamespace(r.repository, r.Registry) { - return fmt.Sprintf("%s/%s", defaultNamespace, r.repository) - } - return r.repository -} - -// Name returns the name from which the Repository was derived. -func (r Repository) Name() string { - regName := r.Registry.Name() - if regName != "" { - return regName + regRepoDelimiter + r.RepositoryStr() - } - // TODO: As far as I can tell, this is unreachable. - return r.RepositoryStr() -} - -func (r Repository) String() string { - return r.Name() -} - -// Scope returns the scope required to perform the given action on the registry. -// TODO(jonjohnsonjr): consider moving scopes to a separate package. -func (r Repository) Scope(action string) string { - return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action) -} - -func checkRepository(repository string) error { - return checkElement("repository", repository, repositoryChars, 2, 255) -} - -// NewRepository returns a new Repository representing the given name, according to the given strictness. -func NewRepository(name string, opts ...Option) (Repository, error) { - opt := makeOptions(opts...) - if len(name) == 0 { - return Repository{}, NewErrBadName("a repository name must be specified") - } - - var registry string - repo := name - parts := strings.SplitN(name, regRepoDelimiter, 2) - if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) { - // The first part of the repository is treated as the registry domain - // iff it contains a '.' or ':' character, otherwise it is all repository - // and the domain defaults to Docker Hub. - registry = parts[0] - repo = parts[1] - } - - if err := checkRepository(repo); err != nil { - return Repository{}, err - } - - reg, err := NewRegistry(registry, opts...) - if err != nil { - return Repository{}, err - } - if hasImplicitNamespace(repo, reg) && opt.strict { - return Repository{}, NewErrBadName("strict validation requires the full repository path (missing 'library')") - } - return Repository{reg, repo}, nil -} - -// Tag returns a Tag in this Repository. -func (r Repository) Tag(identifier string) Tag { - t := Tag{ - tag: identifier, - Repository: r, - } - t.original = t.Name() - return t -} - -// Digest returns a Digest in this Repository. -func (r Repository) Digest(identifier string) Digest { - d := Digest{ - digest: identifier, - Repository: r, - } - d.original = d.Name() - return d -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/name/tag.go b/test/vendor/github.com/google/go-containerregistry/pkg/name/tag.go deleted file mode 100644 index 66bd1bec3d..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/name/tag.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package name - -import ( - "strings" -) - -const ( - // TODO(dekkagaijin): use the docker/distribution regexes for validation. - tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ" - tagDelim = ":" -) - -// Tag stores a docker tag name in a structured form. -type Tag struct { - Repository - tag string - original string -} - -// Ensure Tag implements Reference -var _ Reference = (*Tag)(nil) - -// Context implements Reference. -func (t Tag) Context() Repository { - return t.Repository -} - -// Identifier implements Reference. -func (t Tag) Identifier() string { - return t.TagStr() -} - -// TagStr returns the tag component of the Tag. -func (t Tag) TagStr() string { - return t.tag -} - -// Name returns the name from which the Tag was derived. -func (t Tag) Name() string { - return t.Repository.Name() + tagDelim + t.TagStr() -} - -// String returns the original input string. -func (t Tag) String() string { - return t.original -} - -// Scope returns the scope required to perform the given action on the tag. -func (t Tag) Scope(action string) string { - return t.Repository.Scope(action) -} - -func checkTag(name string) error { - return checkElement("tag", name, tagChars, 1, 128) -} - -// NewTag returns a new Tag representing the given name, according to the given strictness. -func NewTag(name string, opts ...Option) (Tag, error) { - opt := makeOptions(opts...) - base := name - tag := "" - - // Split on ":" - parts := strings.Split(name, tagDelim) - // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation. - if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) { - base = strings.Join(parts[:len(parts)-1], tagDelim) - tag = parts[len(parts)-1] - } - - // We don't require a tag, but if we get one check it's valid, - // even when not being strict. - // If we are being strict, we want to validate the tag regardless in case - // it's empty. - if tag != "" || opt.strict { - if err := checkTag(tag); err != nil { - return Tag{}, err - } - } - - if tag == "" { - tag = opt.defaultTag - } - - repo, err := NewRepository(base, opts...) - if err != nil { - return Tag{}, err - } - return Tag{ - Repository: repo, - tag: tag, - original: name, - }, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/config.go deleted file mode 100644 index a950b397c1..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/config.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "encoding/json" - "io" - "time" -) - -// ConfigFile is the configuration file that holds the metadata describing -// how to launch a container. See: -// https://github.com/opencontainers/image-spec/blob/master/config.md -// -// docker_version and os.version are not part of the spec but included -// for backwards compatibility. -type ConfigFile struct { - Architecture string `json:"architecture"` - Author string `json:"author,omitempty"` - Container string `json:"container,omitempty"` - Created Time `json:"created,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - History []History `json:"history,omitempty"` - OS string `json:"os"` - RootFS RootFS `json:"rootfs"` - Config Config `json:"config"` - OSVersion string `json:"os.version,omitempty"` -} - -// History is one entry of a list recording how this container image was built. -type History struct { - Author string `json:"author,omitempty"` - Created Time `json:"created,omitempty"` - CreatedBy string `json:"created_by,omitempty"` - Comment string `json:"comment,omitempty"` - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Time is a wrapper around time.Time to help with deep copying -type Time struct { - time.Time -} - -// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time -// type is effectively immutable in the time API, so it is safe to -// copy-by-assign, despite the presence of (unexported) Pointer fields. -func (t *Time) DeepCopyInto(out *Time) { - *out = *t -} - -// RootFS holds the ordered list of file system deltas that comprise the -// container image's root filesystem. -type RootFS struct { - Type string `json:"type"` - DiffIDs []Hash `json:"diff_ids"` -} - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Config is a submessage of the config file described as: -// The execution parameters which SHOULD be used as a base when running -// a container using the image. -// The names of the fields in this message are chosen to reflect the JSON -// payload of the Config as defined here: -// https://git.io/vrAET -// and -// https://github.com/opencontainers/image-spec/blob/master/config.md -type Config struct { - AttachStderr bool `json:"AttachStderr,omitempty"` - AttachStdin bool `json:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty"` - Cmd []string `json:"Cmd,omitempty"` - Healthcheck *HealthConfig `json:"Healthcheck,omitempty"` - Domainname string `json:"Domainname,omitempty"` - Entrypoint []string `json:"Entrypoint,omitempty"` - Env []string `json:"Env,omitempty"` - Hostname string `json:"Hostname,omitempty"` - Image string `json:"Image,omitempty"` - Labels map[string]string `json:"Labels,omitempty"` - OnBuild []string `json:"OnBuild,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty"` - StdinOnce bool `json:"StdinOnce,omitempty"` - Tty bool `json:"Tty,omitempty"` - User string `json:"User,omitempty"` - Volumes map[string]struct{} `json:"Volumes,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty"` - ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` - ArgsEscaped bool `json:"ArgsEscaped,omitempty"` - NetworkDisabled bool `json:"NetworkDisabled,omitempty"` - MacAddress string `json:"MacAddress,omitempty"` - StopSignal string `json:"StopSignal,omitempty"` - Shell []string `json:"Shell,omitempty"` -} - -// ParseConfigFile parses the io.Reader's contents into a ConfigFile. -func ParseConfigFile(r io.Reader) (*ConfigFile, error) { - cf := ConfigFile{} - if err := json.NewDecoder(r).Decode(&cf); err != nil { - return nil, err - } - return &cf, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go deleted file mode 100644 index 7a84736be2..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +k8s:deepcopy-gen=package - -// Package v1 defines structured types for OCI v1 images -package v1 diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go deleted file mode 100644 index e9630087e1..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "io" - "strconv" - "strings" -) - -// Hash is an unqualified digest of some content, e.g. sha256:deadbeef -type Hash struct { - // Algorithm holds the algorithm used to compute the hash. - Algorithm string - - // Hex holds the hex portion of the content hash. - Hex string -} - -// String reverses NewHash returning the string-form of the hash. -func (h Hash) String() string { - return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex) -} - -// NewHash validates the input string is a hash and returns a strongly type Hash object. -func NewHash(s string) (Hash, error) { - h := Hash{} - if err := h.parse(s); err != nil { - return Hash{}, err - } - return h, nil -} - -// MarshalJSON implements json.Marshaler -func (h Hash) MarshalJSON() ([]byte, error) { - return json.Marshal(h.String()) -} - -// UnmarshalJSON implements json.Unmarshaler -func (h *Hash) UnmarshalJSON(data []byte) error { - s, err := strconv.Unquote(string(data)) - if err != nil { - return err - } - return h.parse(s) -} - -// MarshalText implements encoding.TextMarshaler. This is required to use -// v1.Hash as a key in a map when marshalling JSON. -func (h Hash) MarshalText() (text []byte, err error) { - return []byte(h.String()), nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. This is required to use -// v1.Hash as a key in a map when unmarshalling JSON. -func (h *Hash) UnmarshalText(text []byte) error { - return h.parse(string(text)) -} - -// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256") -func Hasher(name string) (hash.Hash, error) { - switch name { - case "sha256": - return sha256.New(), nil - default: - return nil, fmt.Errorf("unsupported hash: %q", name) - } -} - -func (h *Hash) parse(unquoted string) error { - parts := strings.Split(unquoted, ":") - if len(parts) != 2 { - return fmt.Errorf("cannot parse hash: %q", unquoted) - } - - rest := strings.TrimLeft(parts[1], "0123456789abcdef") - if len(rest) != 0 { - return fmt.Errorf("found non-hex character in hash: %c", rest[0]) - } - - hasher, err := Hasher(parts[0]) - if err != nil { - return err - } - // Compare the hex to the expected size (2 hex characters per byte) - if len(parts[1]) != hasher.Size()*2 { - return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1]) - } - - h.Algorithm = parts[0] - h.Hex = parts[1] - return nil -} - -// SHA256 computes the Hash of the provided io.Reader's content. -func SHA256(r io.Reader) (Hash, int64, error) { - hasher := sha256.New() - n, err := io.Copy(hasher, r) - if err != nil { - return Hash{}, 0, err - } - return Hash{ - Algorithm: "sha256", - Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), - }, n, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/image.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/image.go deleted file mode 100644 index 8de9e47645..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/image.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Image defines the interface for interacting with an OCI v1 image. -type Image interface { - // Layers returns the ordered collection of filesystem layers that comprise this image. - // The order of the list is oldest/base layer first, and most-recent/top layer last. - Layers() ([]Layer, error) - - // MediaType of this image's manifest. - MediaType() (types.MediaType, error) - - // Size returns the size of the manifest. - Size() (int64, error) - - // ConfigName returns the hash of the image's config file, also known as - // the Image ID. - ConfigName() (Hash, error) - - // ConfigFile returns this image's config file. - ConfigFile() (*ConfigFile, error) - - // RawConfigFile returns the serialized bytes of ConfigFile(). - RawConfigFile() ([]byte, error) - - // Digest returns the sha256 of this image's manifest. - Digest() (Hash, error) - - // Manifest returns this image's Manifest object. - Manifest() (*Manifest, error) - - // RawManifest returns the serialized bytes of Manifest() - RawManifest() ([]byte, error) - - // LayerByDigest returns a Layer for interacting with a particular layer of - // the image, looking it up by "digest" (the compressed hash). - LayerByDigest(Hash) (Layer, error) - - // LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" - // (the uncompressed hash). - LayerByDiffID(Hash) (Layer, error) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/index.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/index.go deleted file mode 100644 index 8e7bc8ebb3..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/index.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// ImageIndex defines the interface for interacting with an OCI image index. -type ImageIndex interface { - // MediaType of this image's manifest. - MediaType() (types.MediaType, error) - - // Digest returns the sha256 of this index's manifest. - Digest() (Hash, error) - - // Size returns the size of the manifest. - Size() (int64, error) - - // IndexManifest returns this image index's manifest object. - IndexManifest() (*IndexManifest, error) - - // RawManifest returns the serialized bytes of IndexManifest(). - RawManifest() ([]byte, error) - - // Image returns a v1.Image that this ImageIndex references. - Image(Hash) (Image, error) - - // ImageIndex returns a v1.ImageIndex that this ImageIndex references. - ImageIndex(Hash) (ImageIndex, error) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go deleted file mode 100644 index 57447d263d..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "io" - - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Layer is an interface for accessing the properties of a particular layer of a v1.Image -type Layer interface { - // Digest returns the Hash of the compressed layer. - Digest() (Hash, error) - - // DiffID returns the Hash of the uncompressed layer. - DiffID() (Hash, error) - - // Compressed returns an io.ReadCloser for the compressed layer contents. - Compressed() (io.ReadCloser, error) - - // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. - Uncompressed() (io.ReadCloser, error) - - // Size returns the compressed size of the Layer. - Size() (int64, error) - - // MediaType returns the media type of the Layer. - MediaType() (types.MediaType, error) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go deleted file mode 100644 index 51a4670405..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "encoding/json" - "io" - - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// Manifest represents the OCI image manifest in a structured way. -type Manifest struct { - SchemaVersion int64 `json:"schemaVersion"` - MediaType types.MediaType `json:"mediaType,omitempty"` - Config Descriptor `json:"config"` - Layers []Descriptor `json:"layers"` - Annotations map[string]string `json:"annotations,omitempty"` -} - -// IndexManifest represents an OCI image index in a structured way. -type IndexManifest struct { - SchemaVersion int64 `json:"schemaVersion"` - MediaType types.MediaType `json:"mediaType,omitempty"` - Manifests []Descriptor `json:"manifests"` - Annotations map[string]string `json:"annotations,omitempty"` -} - -// Descriptor holds a reference from the manifest to one of its constituent elements. -type Descriptor struct { - MediaType types.MediaType `json:"mediaType"` - Size int64 `json:"size"` - Digest Hash `json:"digest"` - URLs []string `json:"urls,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` - Platform *Platform `json:"platform,omitempty"` -} - -// ParseManifest parses the io.Reader's contents into a Manifest. -func ParseManifest(r io.Reader) (*Manifest, error) { - m := Manifest{} - if err := json.NewDecoder(r).Decode(&m); err != nil { - return nil, err - } - return &m, nil -} - -// ParseIndexManifest parses the io.Reader's contents into an IndexManifest. -func ParseIndexManifest(r io.Reader) (*IndexManifest, error) { - im := IndexManifest{} - if err := json.NewDecoder(r).Decode(&im); err != nil { - return nil, err - } - return &im, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go deleted file mode 100644 index 0f886667ad..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package match provides functionality for conveniently matching a v1.Descriptor. -package match - -import ( - v1 "github.com/google/go-containerregistry/pkg/v1" - imagespec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Matcher function that is given a v1.Descriptor, and returns whether or -// not it matches a given rule. Can match on anything it wants in the Descriptor. -type Matcher func(desc v1.Descriptor) bool - -// Name returns a match.Matcher that matches based on the value of the -// "org.opencontainers.image.ref.name" annotation: -// github.com/opencontainers/image-spec/blob/v1.0.1/annotations.md#pre-defined-annotation-keys -func Name(name string) Matcher { - return Annotation(imagespec.AnnotationRefName, name) -} - -// Annotation returns a match.Matcher that matches based on the provided annotation. -func Annotation(key, value string) Matcher { - return func(desc v1.Descriptor) bool { - if desc.Annotations == nil { - return false - } - if aValue, ok := desc.Annotations[key]; ok && aValue == value { - return true - } - return false - } -} - -// Platforms returns a match.Matcher that matches on any one of the provided platforms. -// Ignores any descriptors that do not have a platform. -func Platforms(platforms ...v1.Platform) Matcher { - return func(desc v1.Descriptor) bool { - if desc.Platform == nil { - return false - } - for _, platform := range platforms { - if desc.Platform.Equals(platform) { - return true - } - } - return false - } -} - -// MediaTypes returns a match.Matcher that matches at least one of the provided media types. -func MediaTypes(mediaTypes ...string) Matcher { - mts := map[string]bool{} - for _, media := range mediaTypes { - mts[media] = true - } - return func(desc v1.Descriptor) bool { - if desc.MediaType == "" { - return false - } - if _, ok := mts[string(desc.MediaType)]; ok { - return true - } - return false - } -} - -// Digests returns a match.Matcher that matches at least one of the provided Digests -func Digests(digests ...v1.Hash) Matcher { - digs := map[v1.Hash]bool{} - for _, digest := range digests { - digs[digest] = true - } - return func(desc v1.Descriptor) bool { - _, ok := digs[desc.Digest] - return ok - } -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md b/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md deleted file mode 100644 index c5710f9a0f..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# `partial` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial) - -## Partial Implementations - -There are roughly two kinds of image representations: compressed and uncompressed. - -The implementations for these kinds of images are almost identical, with the only -major difference being how blobs (config and layers) are fetched. This common -code lives in this package, where you provide a _partial_ implementation of a -compressed or uncompressed image, and you get back a full `v1.Image` implementation. - -### Examples - -In a registry, blobs are compressed, so it's easiest to implement a `v1.Image` in terms -of compressed layers. `remote.remoteImage` does this by implementing `CompressedImageCore`: - -```go -type CompressedImageCore interface { - RawConfigFile() ([]byte, error) - MediaType() (types.MediaType, error) - RawManifest() ([]byte, error) - LayerByDigest(v1.Hash) (CompressedLayer, error) -} -``` - -In a tarball, blobs are (often) uncompressed, so it's easiest to implement a `v1.Image` in terms -of uncompressed layers. `tarball.uncompressedImage` does this by implementing `UncompressedImageCore`: - -```go -type CompressedImageCore interface { - RawConfigFile() ([]byte, error) - MediaType() (types.MediaType, error) - LayerByDiffID(v1.Hash) (UncompressedLayer, error) -} -``` - -## Optional Methods - -Where possible, we access some information via optional methods as an optimization. - -### [`partial.Descriptor`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Descriptor) - -There are some properties of a [`Descriptor`](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) that aren't derivable from just image data: - -* `MediaType` -* `Platform` -* `URLs` -* `Annotations` - -For example, in a `tarball.Image`, there is a `LayerSources` field that contains -an entire layer descriptor with `URLs` information for foreign layers. This -information can be passed through to callers by implementing this optional -`Descriptor` method. - -See [`#654`](https://github.com/google/go-containerregistry/pull/654). - -### [`partial.UncompressedSize`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#UncompressedSize) - -Usually, you don't need to know the uncompressed size of a layer, since that -information isn't stored in a config file (just he sha256 is needed); however, -there are cases where it is very helpful to know the layer size, e.g. when -writing the uncompressed layer into a tarball. - -See [`#655`](https://github.com/google/go-containerregistry/pull/655). - -### [`partial.Exists`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Exists) - -We generally don't care about the existence of something as granular as a -layer, and would rather ensure all the invariants of an image are upheld via -the `validate` package. However, there are situations where we want to do a -quick smoke test to ensure that the underlying storage engine hasn't been -corrupted by something e.g. deleting files or blobs. Thus, we've exposed an -optional `Exists` method that does an existence check without actually reading -any bytes. - -The `remote` package implements this via `HEAD` requests. - -The `layout` package implements this via `os.Stat`. - -See [`#838`](https://github.com/google/go-containerregistry/pull/838). diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go deleted file mode 100644 index 2e6e548ac9..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "io" - - "github.com/google/go-containerregistry/internal/gzip" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// CompressedLayer represents the bare minimum interface a natively -// compressed layer must implement for us to produce a v1.Layer -type CompressedLayer interface { - // Digest returns the Hash of the compressed layer. - Digest() (v1.Hash, error) - - // Compressed returns an io.ReadCloser for the compressed layer contents. - Compressed() (io.ReadCloser, error) - - // Size returns the compressed size of the Layer. - Size() (int64, error) - - // Returns the mediaType for the compressed Layer - MediaType() (types.MediaType, error) -} - -// compressedLayerExtender implements v1.Image using the compressed base properties. -type compressedLayerExtender struct { - CompressedLayer -} - -// Uncompressed implements v1.Layer -func (cle *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) { - r, err := cle.Compressed() - if err != nil { - return nil, err - } - return gzip.UnzipReadCloser(r) -} - -// DiffID implements v1.Layer -func (cle *compressedLayerExtender) DiffID() (v1.Hash, error) { - // If our nested CompressedLayer implements DiffID, - // then delegate to it instead. - if wdi, ok := cle.CompressedLayer.(WithDiffID); ok { - return wdi.DiffID() - } - r, err := cle.Uncompressed() - if err != nil { - return v1.Hash{}, err - } - defer r.Close() - h, _, err := v1.SHA256(r) - return h, err -} - -// CompressedToLayer fills in the missing methods from a CompressedLayer so that it implements v1.Layer -func CompressedToLayer(ul CompressedLayer) (v1.Layer, error) { - return &compressedLayerExtender{ul}, nil -} - -// CompressedImageCore represents the base minimum interface a natively -// compressed image must implement for us to produce a v1.Image. -type CompressedImageCore interface { - ImageCore - - // RawManifest returns the serialized bytes of the manifest. - RawManifest() ([]byte, error) - - // LayerByDigest is a variation on the v1.Image method, which returns - // a CompressedLayer instead. - LayerByDigest(v1.Hash) (CompressedLayer, error) -} - -// compressedImageExtender implements v1.Image by extending CompressedImageCore with the -// appropriate methods computed from the minimal core. -type compressedImageExtender struct { - CompressedImageCore -} - -// Assert that our extender type completes the v1.Image interface -var _ v1.Image = (*compressedImageExtender)(nil) - -// Digest implements v1.Image -func (i *compressedImageExtender) Digest() (v1.Hash, error) { - return Digest(i) -} - -// ConfigName implements v1.Image -func (i *compressedImageExtender) ConfigName() (v1.Hash, error) { - return ConfigName(i) -} - -// Layers implements v1.Image -func (i *compressedImageExtender) Layers() ([]v1.Layer, error) { - hs, err := FSLayers(i) - if err != nil { - return nil, err - } - ls := make([]v1.Layer, 0, len(hs)) - for _, h := range hs { - l, err := i.LayerByDigest(h) - if err != nil { - return nil, err - } - ls = append(ls, l) - } - return ls, nil -} - -// LayerByDigest implements v1.Image -func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { - cl, err := i.CompressedImageCore.LayerByDigest(h) - if err != nil { - return nil, err - } - return CompressedToLayer(cl) -} - -// LayerByDiffID implements v1.Image -func (i *compressedImageExtender) LayerByDiffID(h v1.Hash) (v1.Layer, error) { - h, err := DiffIDToBlob(i, h) - if err != nil { - return nil, err - } - return i.LayerByDigest(h) -} - -// ConfigFile implements v1.Image -func (i *compressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { - return ConfigFile(i) -} - -// Manifest implements v1.Image -func (i *compressedImageExtender) Manifest() (*v1.Manifest, error) { - return Manifest(i) -} - -// Size implements v1.Image -func (i *compressedImageExtender) Size() (int64, error) { - return Size(i) -} - -// CompressedToImage fills in the missing methods from a CompressedImageCore so that it implements v1.Image -func CompressedToImage(cic CompressedImageCore) (v1.Image, error) { - return &compressedImageExtender{ - CompressedImageCore: cic, - }, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go deleted file mode 100644 index 153dfe4d53..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package partial defines methods for building up a v1.Image from -// minimal subsets that are sufficient for defining a v1.Image. -package partial diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go deleted file mode 100644 index c65f45e0dc..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// ImageCore is the core set of properties without which we cannot build a v1.Image -type ImageCore interface { - // RawConfigFile returns the serialized bytes of this image's config file. - RawConfigFile() ([]byte, error) - - // MediaType of this image's manifest. - MediaType() (types.MediaType, error) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go deleted file mode 100644 index 9c7a92485b..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "fmt" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/match" -) - -// FindManifests given a v1.ImageIndex, find the manifests that fit the matcher. -func FindManifests(index v1.ImageIndex, matcher match.Matcher) ([]v1.Descriptor, error) { - // get the actual manifest list - indexManifest, err := index.IndexManifest() - if err != nil { - return nil, fmt.Errorf("unable to get raw index: %v", err) - } - manifests := []v1.Descriptor{} - // try to get the root of our image - for _, manifest := range indexManifest.Manifests { - if matcher(manifest) { - manifests = append(manifests, manifest) - } - } - return manifests, nil -} - -// FindImages given a v1.ImageIndex, find the images that fit the matcher. If a Descriptor -// matches the provider Matcher, but the referenced item is not an Image, ignores it. -// Only returns those that match the Matcher and are images. -func FindImages(index v1.ImageIndex, matcher match.Matcher) ([]v1.Image, error) { - matches := []v1.Image{} - manifests, err := FindManifests(index, matcher) - if err != nil { - return nil, err - } - for _, desc := range manifests { - // if it is not an image, ignore it - if !desc.MediaType.IsImage() { - continue - } - img, err := index.Image(desc.Digest) - if err != nil { - return nil, err - } - matches = append(matches, img) - } - return matches, nil -} - -// FindIndexes given a v1.ImageIndex, find the indexes that fit the matcher. If a Descriptor -// matches the provider Matcher, but the referenced item is not an Index, ignores it. -// Only returns those that match the Matcher and are indexes. -func FindIndexes(index v1.ImageIndex, matcher match.Matcher) ([]v1.ImageIndex, error) { - matches := []v1.ImageIndex{} - manifests, err := FindManifests(index, matcher) - if err != nil { - return nil, err - } - for _, desc := range manifests { - if !desc.MediaType.IsIndex() { - continue - } - // if it is not an index, ignore it - idx, err := index.ImageIndex(desc.Digest) - if err != nil { - return nil, err - } - matches = append(matches, idx) - } - return matches, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go deleted file mode 100644 index df20d3aa9e..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "bytes" - "io" - "sync" - - "github.com/google/go-containerregistry/internal/gzip" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// UncompressedLayer represents the bare minimum interface a natively -// uncompressed layer must implement for us to produce a v1.Layer -type UncompressedLayer interface { - // DiffID returns the Hash of the uncompressed layer. - DiffID() (v1.Hash, error) - - // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. - Uncompressed() (io.ReadCloser, error) - - // Returns the mediaType for the compressed Layer - MediaType() (types.MediaType, error) -} - -// uncompressedLayerExtender implements v1.Image using the uncompressed base properties. -type uncompressedLayerExtender struct { - UncompressedLayer - // Memoize size/hash so that the methods aren't twice as - // expensive as doing this manually. - hash v1.Hash - size int64 - hashSizeError error - once sync.Once -} - -// Compressed implements v1.Layer -func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) { - u, err := ule.Uncompressed() - if err != nil { - return nil, err - } - return gzip.ReadCloser(u), nil -} - -// Digest implements v1.Layer -func (ule *uncompressedLayerExtender) Digest() (v1.Hash, error) { - ule.calcSizeHash() - return ule.hash, ule.hashSizeError -} - -// Size implements v1.Layer -func (ule *uncompressedLayerExtender) Size() (int64, error) { - ule.calcSizeHash() - return ule.size, ule.hashSizeError -} - -func (ule *uncompressedLayerExtender) calcSizeHash() { - ule.once.Do(func() { - var r io.ReadCloser - r, ule.hashSizeError = ule.Compressed() - if ule.hashSizeError != nil { - return - } - defer r.Close() - ule.hash, ule.size, ule.hashSizeError = v1.SHA256(r) - }) -} - -// UncompressedToLayer fills in the missing methods from an UncompressedLayer so that it implements v1.Layer -func UncompressedToLayer(ul UncompressedLayer) (v1.Layer, error) { - return &uncompressedLayerExtender{UncompressedLayer: ul}, nil -} - -// UncompressedImageCore represents the bare minimum interface a natively -// uncompressed image must implement for us to produce a v1.Image -type UncompressedImageCore interface { - ImageCore - - // LayerByDiffID is a variation on the v1.Image method, which returns - // an UncompressedLayer instead. - LayerByDiffID(v1.Hash) (UncompressedLayer, error) -} - -// UncompressedToImage fills in the missing methods from an UncompressedImageCore so that it implements v1.Image. -func UncompressedToImage(uic UncompressedImageCore) (v1.Image, error) { - return &uncompressedImageExtender{ - UncompressedImageCore: uic, - }, nil -} - -// uncompressedImageExtender implements v1.Image by extending UncompressedImageCore with the -// appropriate methods computed from the minimal core. -type uncompressedImageExtender struct { - UncompressedImageCore - - lock sync.Mutex - manifest *v1.Manifest -} - -// Assert that our extender type completes the v1.Image interface -var _ v1.Image = (*uncompressedImageExtender)(nil) - -// Digest implements v1.Image -func (i *uncompressedImageExtender) Digest() (v1.Hash, error) { - return Digest(i) -} - -// Manifest implements v1.Image -func (i *uncompressedImageExtender) Manifest() (*v1.Manifest, error) { - i.lock.Lock() - defer i.lock.Unlock() - if i.manifest != nil { - return i.manifest, nil - } - - b, err := i.RawConfigFile() - if err != nil { - return nil, err - } - - cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) - if err != nil { - return nil, err - } - - m := &v1.Manifest{ - SchemaVersion: 2, - MediaType: types.DockerManifestSchema2, - Config: v1.Descriptor{ - MediaType: types.DockerConfigJSON, - Size: cfgSize, - Digest: cfgHash, - }, - } - - ls, err := i.Layers() - if err != nil { - return nil, err - } - - m.Layers = make([]v1.Descriptor, len(ls)) - for i, l := range ls { - desc, err := Descriptor(l) - if err != nil { - return nil, err - } - - m.Layers[i] = *desc - } - - i.manifest = m - return i.manifest, nil -} - -// RawManifest implements v1.Image -func (i *uncompressedImageExtender) RawManifest() ([]byte, error) { - return RawManifest(i) -} - -// Size implements v1.Image -func (i *uncompressedImageExtender) Size() (int64, error) { - return Size(i) -} - -// ConfigName implements v1.Image -func (i *uncompressedImageExtender) ConfigName() (v1.Hash, error) { - return ConfigName(i) -} - -// ConfigFile implements v1.Image -func (i *uncompressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { - return ConfigFile(i) -} - -// Layers implements v1.Image -func (i *uncompressedImageExtender) Layers() ([]v1.Layer, error) { - diffIDs, err := DiffIDs(i) - if err != nil { - return nil, err - } - ls := make([]v1.Layer, 0, len(diffIDs)) - for _, h := range diffIDs { - l, err := i.LayerByDiffID(h) - if err != nil { - return nil, err - } - ls = append(ls, l) - } - return ls, nil -} - -// LayerByDiffID implements v1.Image -func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, error) { - ul, err := i.UncompressedImageCore.LayerByDiffID(diffID) - if err != nil { - return nil, err - } - return UncompressedToLayer(ul) -} - -// LayerByDigest implements v1.Image -func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { - diffID, err := BlobToDiffID(i, h) - if err != nil { - return nil, err - } - return i.LayerByDiffID(diffID) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go deleted file mode 100644 index 3a5c615722..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package partial - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// WithRawConfigFile defines the subset of v1.Image used by these helper methods -type WithRawConfigFile interface { - // RawConfigFile returns the serialized bytes of this image's config file. - RawConfigFile() ([]byte, error) -} - -// ConfigFile is a helper for implementing v1.Image -func ConfigFile(i WithRawConfigFile) (*v1.ConfigFile, error) { - b, err := i.RawConfigFile() - if err != nil { - return nil, err - } - return v1.ParseConfigFile(bytes.NewReader(b)) -} - -// ConfigName is a helper for implementing v1.Image -func ConfigName(i WithRawConfigFile) (v1.Hash, error) { - b, err := i.RawConfigFile() - if err != nil { - return v1.Hash{}, err - } - h, _, err := v1.SHA256(bytes.NewReader(b)) - return h, err -} - -type configLayer struct { - hash v1.Hash - content []byte -} - -// Digest implements v1.Layer -func (cl *configLayer) Digest() (v1.Hash, error) { - return cl.hash, nil -} - -// DiffID implements v1.Layer -func (cl *configLayer) DiffID() (v1.Hash, error) { - return cl.hash, nil -} - -// Uncompressed implements v1.Layer -func (cl *configLayer) Uncompressed() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil -} - -// Compressed implements v1.Layer -func (cl *configLayer) Compressed() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil -} - -// Size implements v1.Layer -func (cl *configLayer) Size() (int64, error) { - return int64(len(cl.content)), nil -} - -func (cl *configLayer) MediaType() (types.MediaType, error) { - // Defaulting this to OCIConfigJSON as it should remain - // backwards compatible with DockerConfigJSON - return types.OCIConfigJSON, nil -} - -var _ v1.Layer = (*configLayer)(nil) - -// ConfigLayer implements v1.Layer from the raw config bytes. -// This is so that clients (e.g. remote) can access the config as a blob. -func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) { - h, err := ConfigName(i) - if err != nil { - return nil, err - } - rcfg, err := i.RawConfigFile() - if err != nil { - return nil, err - } - return &configLayer{ - hash: h, - content: rcfg, - }, nil -} - -// WithConfigFile defines the subset of v1.Image used by these helper methods -type WithConfigFile interface { - // ConfigFile returns this image's config file. - ConfigFile() (*v1.ConfigFile, error) -} - -// DiffIDs is a helper for implementing v1.Image -func DiffIDs(i WithConfigFile) ([]v1.Hash, error) { - cfg, err := i.ConfigFile() - if err != nil { - return nil, err - } - return cfg.RootFS.DiffIDs, nil -} - -// RawConfigFile is a helper for implementing v1.Image -func RawConfigFile(i WithConfigFile) ([]byte, error) { - cfg, err := i.ConfigFile() - if err != nil { - return nil, err - } - return json.Marshal(cfg) -} - -// WithRawManifest defines the subset of v1.Image used by these helper methods -type WithRawManifest interface { - // RawManifest returns the serialized bytes of this image's config file. - RawManifest() ([]byte, error) -} - -// Digest is a helper for implementing v1.Image -func Digest(i WithRawManifest) (v1.Hash, error) { - mb, err := i.RawManifest() - if err != nil { - return v1.Hash{}, err - } - digest, _, err := v1.SHA256(bytes.NewReader(mb)) - return digest, err -} - -// Manifest is a helper for implementing v1.Image -func Manifest(i WithRawManifest) (*v1.Manifest, error) { - b, err := i.RawManifest() - if err != nil { - return nil, err - } - return v1.ParseManifest(bytes.NewReader(b)) -} - -// WithManifest defines the subset of v1.Image used by these helper methods -type WithManifest interface { - // Manifest returns this image's Manifest object. - Manifest() (*v1.Manifest, error) -} - -// RawManifest is a helper for implementing v1.Image -func RawManifest(i WithManifest) ([]byte, error) { - m, err := i.Manifest() - if err != nil { - return nil, err - } - return json.Marshal(m) -} - -// Size is a helper for implementing v1.Image -func Size(i WithRawManifest) (int64, error) { - b, err := i.RawManifest() - if err != nil { - return -1, err - } - return int64(len(b)), nil -} - -// FSLayers is a helper for implementing v1.Image -func FSLayers(i WithManifest) ([]v1.Hash, error) { - m, err := i.Manifest() - if err != nil { - return nil, err - } - fsl := make([]v1.Hash, len(m.Layers)) - for i, l := range m.Layers { - fsl[i] = l.Digest - } - return fsl, nil -} - -// BlobSize is a helper for implementing v1.Image -func BlobSize(i WithManifest, h v1.Hash) (int64, error) { - d, err := BlobDescriptor(i, h) - if err != nil { - return -1, err - } - return d.Size, nil -} - -// BlobDescriptor is a helper for implementing v1.Image -func BlobDescriptor(i WithManifest, h v1.Hash) (*v1.Descriptor, error) { - m, err := i.Manifest() - if err != nil { - return nil, err - } - - if m.Config.Digest == h { - return &m.Config, nil - } - - for _, l := range m.Layers { - if l.Digest == h { - return &l, nil - } - } - return nil, fmt.Errorf("blob %v not found", h) -} - -// WithManifestAndConfigFile defines the subset of v1.Image used by these helper methods -type WithManifestAndConfigFile interface { - WithConfigFile - - // Manifest returns this image's Manifest object. - Manifest() (*v1.Manifest, error) -} - -// BlobToDiffID is a helper for mapping between compressed -// and uncompressed blob hashes. -func BlobToDiffID(i WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { - blobs, err := FSLayers(i) - if err != nil { - return v1.Hash{}, err - } - diffIDs, err := DiffIDs(i) - if err != nil { - return v1.Hash{}, err - } - if len(blobs) != len(diffIDs) { - return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) - } - for i, blob := range blobs { - if blob == h { - return diffIDs[i], nil - } - } - return v1.Hash{}, fmt.Errorf("unknown blob %v", h) -} - -// DiffIDToBlob is a helper for mapping between uncompressed -// and compressed blob hashes. -func DiffIDToBlob(wm WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { - blobs, err := FSLayers(wm) - if err != nil { - return v1.Hash{}, err - } - diffIDs, err := DiffIDs(wm) - if err != nil { - return v1.Hash{}, err - } - if len(blobs) != len(diffIDs) { - return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) - } - for i, diffID := range diffIDs { - if diffID == h { - return blobs[i], nil - } - } - return v1.Hash{}, fmt.Errorf("unknown diffID %v", h) -} - -// WithDiffID defines the subset of v1.Layer for exposing the DiffID method. -type WithDiffID interface { - DiffID() (v1.Hash, error) -} - -// withDescriptor allows partial layer implementations to provide a layer -// descriptor to the partial image manifest builder. This allows partial -// uncompressed layers to provide foreign layer metadata like URLs to the -// uncompressed image manifest. -type withDescriptor interface { - Descriptor() (*v1.Descriptor, error) -} - -// Describable represents something for which we can produce a v1.Descriptor. -type Describable interface { - Digest() (v1.Hash, error) - MediaType() (types.MediaType, error) - Size() (int64, error) -} - -// Descriptor returns a v1.Descriptor given a Describable. It also encodes -// some logic for unwrapping things that have been wrapped by -// CompressedToLayer, UncompressedToLayer, CompressedToImage, or -// UncompressedToImage. -func Descriptor(d Describable) (*v1.Descriptor, error) { - // If Describable implements Descriptor itself, return that. - if wd, ok := unwrap(d).(withDescriptor); ok { - return wd.Descriptor() - } - - // If all else fails, compute the descriptor from the individual methods. - var ( - desc v1.Descriptor - err error - ) - - if desc.Size, err = d.Size(); err != nil { - return nil, err - } - if desc.Digest, err = d.Digest(); err != nil { - return nil, err - } - if desc.MediaType, err = d.MediaType(); err != nil { - return nil, err - } - - return &desc, nil -} - -type withUncompressedSize interface { - UncompressedSize() (int64, error) -} - -// UncompressedSize returns the size of the Uncompressed layer. If the -// underlying implementation doesn't implement UncompressedSize directly, -// this will compute the uncompressedSize by reading everything returned -// by Compressed(). This is potentially expensive and may consume the contents -// for streaming layers. -func UncompressedSize(l v1.Layer) (int64, error) { - // If the layer implements UncompressedSize itself, return that. - if wus, ok := unwrap(l).(withUncompressedSize); ok { - return wus.UncompressedSize() - } - - // The layer doesn't implement UncompressedSize, we need to compute it. - rc, err := l.Uncompressed() - if err != nil { - return -1, err - } - defer rc.Close() - - return io.Copy(ioutil.Discard, rc) -} - -type withExists interface { - Exists() (bool, error) -} - -// Exists checks to see if a layer exists. This is a hack to work around the -// mistakes of the partial package. Don't use this. -func Exists(l v1.Layer) (bool, error) { - // If the layer implements Exists itself, return that. - if we, ok := unwrap(l).(withExists); ok { - return we.Exists() - } - - // The layer doesn't implement Exists, so we hope that calling Compressed() - // is enough to trigger an error if the layer does not exist. - rc, err := l.Compressed() - if err != nil { - return false, err - } - defer rc.Close() - - // We may want to try actually reading a single byte, but if we need to do - // that, we should just fix this hack. - return true, nil -} - -// Recursively unwrap our wrappers so that we can check for the original implementation. -// We might want to expose this? -func unwrap(i interface{}) interface{} { - if ule, ok := i.(*uncompressedLayerExtender); ok { - return unwrap(ule.UncompressedLayer) - } - if cle, ok := i.(*compressedLayerExtender); ok { - return unwrap(cle.CompressedLayer) - } - if uie, ok := i.(*uncompressedImageExtender); ok { - return unwrap(uie.UncompressedImageCore) - } - if cie, ok := i.(*compressedImageExtender); ok { - return unwrap(cie.CompressedImageCore) - } - return i -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go deleted file mode 100644 index a586ab3675..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "sort" -) - -// Platform represents the target os/arch for an image. -type Platform struct { - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - Variant string `json:"variant,omitempty"` - Features []string `json:"features,omitempty"` -} - -// Equals returns true if the given platform is semantically equivalent to this one. -// The order of Features and OSFeatures is not important. -func (p Platform) Equals(o Platform) bool { - return p.OS == o.OS && p.Architecture == o.Architecture && p.Variant == o.Variant && p.OSVersion == o.OSVersion && - stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) && stringSliceEqualIgnoreOrder(p.Features, o.Features) -} - -// stringSliceEqual compares 2 string slices and returns if their contents are identical. -func stringSliceEqual(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i, elm := range a { - if elm != b[i] { - return false - } - } - return true -} - -// stringSliceEqualIgnoreOrder compares 2 string slices and returns if their contents are identical, ignoring order -func stringSliceEqualIgnoreOrder(a, b []string) bool { - a1, b1 := a[:], b[:] - if a1 != nil && b1 != nil { - sort.Strings(a1) - sort.Strings(b1) - } - return stringSliceEqual(a1, b1) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go deleted file mode 100644 index 844f04d937..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -// Update representation of an update of transfer progress. Some functions -// in this module can take a channel to which updates will be sent while a -// transfer is in progress. -// +k8s:deepcopy-gen=false -type Update struct { - Total int64 - Complete int64 - Error error -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md deleted file mode 100644 index c1e81b310b..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# `remote` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) - -The `remote` package implements a client for accessing a registry, -per the [OCI distribution spec](https://github.com/opencontainers/distribution-spec/blob/master/spec.md). - -It leans heavily on the lower level [`transport`](/pkg/v1/remote/transport) package, which handles the -authentication handshake and structured errors. - -## Usage - -```go -package main - -import ( - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" -) - -func main() { - ref, err := name.ParseReference("gcr.io/google-containers/pause") - if err != nil { - panic(err) - } - - img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) - if err != nil { - panic(err) - } - - // do stuff with img -} -``` - -## Structure - -

- -

- - -## Background - -There are a lot of confusingly similar terms that come up when talking about images in registries. - -### Anatomy of an image - -In general... - -* A tag refers to an image manifest. -* An image manifest references a config file and an orderered list of _compressed_ layers by sha256 digest. -* A config file references an ordered list of _uncompressed_ layers by sha256 digest and contains runtime configuration. -* The sha256 digest of the config file is the [image id](https://github.com/opencontainers/image-spec/blob/master/config.md#imageid) for the image. - -For example, an image with two layers would look something like this: - -![image anatomy](/images/image-anatomy.dot.svg) - -### Anatomy of an index - -In the normal case, an [index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) is used to represent a multi-platform image. -This was the original use case for a [manifest -list](https://docs.docker.com/registry/spec/manifest-v2-2/#manifest-list). - -![image index anatomy](/images/index-anatomy.dot.svg) - -It is possible for an index to reference another index, per the OCI -[image-spec](https://github.com/opencontainers/image-spec/blob/master/media-types.md#compatibility-matrix). -In theory, both an image and image index can reference arbitrary things via -[descriptors](https://github.com/opencontainers/image-spec/blob/master/descriptor.md), -e.g. see the [image layout -example](https://github.com/opencontainers/image-spec/blob/master/image-layout.md#index-example), -which references an application/xml file from an image index. - -That could look something like this: - -![strange image index anatomy](/images/index-anatomy-strange.dot.svg) - -Using a recursive index like this might not be possible with all registries, -but this flexibility allows for some interesting applications, e.g. the -[OCI Artifacts](https://github.com/opencontainers/artifacts) effort. - -### Anatomy of an image upload - -The structure of an image requires a delicate ordering when uploading an image to a registry. -Below is a (slightly simplified) figure that describes how an image is prepared for upload -to a registry and how the data flows between various artifacts: - -![upload](/images/upload.dot.svg) - -Note that: - -* A config file references the uncompressed layer contents by sha256. -* A manifest references the compressed layer contents by sha256 and the size of the layer. -* A manifest references the config file contents by sha256 and the size of the file. - -It follows that during an upload, we need to upload layers before the config file, -and we need to upload the config file before the manifest. - -Sometimes, we know all of this information ahead of time, (e.g. when copying from remote.Image), -so the ordering is less important. - -In other cases, e.g. when using a [`stream.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream#Layer), -we can't compute anything until we have already uploaded the layer, so we need to be careful about ordering. - -## Caveats - -### schema 1 - -This package does not support schema 1 images, see [`#377`](https://github.com/google/go-containerregistry/issues/377), -however, it's possible to do _something_ useful with them via [`remote.Get`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote#Get), -which doesn't try to interpret what is returned by the registry. - -[`crane.Copy`](https://godoc.org/github.com/google/go-containerregistry/pkg/crane#Copy) takes advantage of this to implement support for copying schema 1 images, -see [here](https://github.com/google/go-containerregistry/blob/main/pkg/internal/legacy/copy.go). diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go deleted file mode 100644 index 21b5dbbaaa..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -type catalog struct { - Repos []string `json:"repositories"` -} - -// CatalogPage calls /_catalog, returning the list of repositories on the registry. -func CatalogPage(target name.Registry, last string, n int, options ...Option) ([]string, error) { - o, err := makeOptions(target, options...) - if err != nil { - return nil, err - } - - scopes := []string{target.Scope(transport.PullScope)} - tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes) - if err != nil { - return nil, err - } - - query := fmt.Sprintf("last=%s&n=%d", url.QueryEscape(last), n) - - uri := url.URL{ - Scheme: target.Scheme(), - Host: target.RegistryStr(), - Path: "/v2/_catalog", - RawQuery: query, - } - - client := http.Client{Transport: tr} - req, err := http.NewRequest(http.MethodGet, uri.String(), nil) - if err != nil { - return nil, err - } - resp, err := client.Do(req.WithContext(o.context)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - var parsed catalog - if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { - return nil, err - } - - return parsed.Repos, nil -} - -// Catalog calls /_catalog, returning the list of repositories on the registry. -func Catalog(ctx context.Context, target name.Registry, options ...Option) ([]string, error) { - o, err := makeOptions(target, options...) - if err != nil { - return nil, err - } - - scopes := []string{target.Scope(transport.PullScope)} - tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes) - if err != nil { - return nil, err - } - - uri := &url.URL{ - Scheme: target.Scheme(), - Host: target.RegistryStr(), - Path: "/v2/_catalog", - RawQuery: "n=10000", - } - - client := http.Client{Transport: tr} - - // WithContext overrides the ctx passed directly. - if o.context != context.Background() { - ctx = o.context - } - - var ( - parsed catalog - repoList []string - ) - - // get responses until there is no next page - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - req, err := http.NewRequest("GET", uri.String(), nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { - return nil, err - } - if err := resp.Body.Close(); err != nil { - return nil, err - } - - repoList = append(repoList, parsed.Repos...) - - uri, err = getNextPageURL(resp) - if err != nil { - return nil, err - } - // no next page - if uri == nil { - break - } - } - return repoList, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go deleted file mode 100644 index c841cc0580..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go +++ /dev/null @@ -1,59 +0,0 @@ -package remote - -import ( - "context" - "fmt" - "net/http" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -// CheckPushPermission returns an error if the given keychain cannot authorize -// a push operation to the given ref. -// -// This can be useful to check whether the caller has permission to push an -// image before doing work to construct the image. -// -// TODO(#412): Remove the need for this method. -func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTripper) error { - auth, err := kc.Resolve(ref.Context().Registry) - if err != nil { - return fmt.Errorf("resolving authorization for %v failed: %v", ref.Context().Registry, err) - } - - scopes := []string{ref.Scope(transport.PushScope)} - tr, err := transport.New(ref.Context().Registry, auth, t, scopes) - if err != nil { - return fmt.Errorf("creating push check transport for %v failed: %v", ref.Context().Registry, err) - } - // TODO(jasonhall): Against GCR, just doing the token handshake is - // enough, but this doesn't extend to Dockerhub - // (https://github.com/docker/hub-feedback/issues/1771), so we actually - // need to initiate an upload to tell whether the credentials can - // authorize a push. Figure out how to return early here when we can, - // to avoid a roundtrip for spec-compliant registries. - w := writer{ - repo: ref.Context(), - client: &http.Client{Transport: tr}, - context: context.Background(), - } - loc, _, err := w.initiateUpload("", "") - if loc != "" { - // Since we're only initiating the upload to check whether we - // can, we should attempt to cancel it, in case initiating - // reserves some resources on the server. We shouldn't wait for - // cancelling to complete, and we don't care if it fails. - go w.cancelUpload(loc) - } - return err -} - -func (w *writer) cancelUpload(loc string) { - req, err := http.NewRequest(http.MethodDelete, loc, nil) - if err != nil { - return - } - _, _ = w.client.Do(req) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go deleted file mode 100644 index 3b9022719c..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "fmt" - "net/http" - "net/url" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -// Delete removes the specified image reference from the remote registry. -func Delete(ref name.Reference, options ...Option) error { - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return err - } - scopes := []string{ref.Scope(transport.DeleteScope)} - tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) - if err != nil { - return err - } - c := &http.Client{Transport: tr} - - u := url.URL{ - Scheme: ref.Context().Registry.Scheme(), - Host: ref.Context().RegistryStr(), - Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()), - } - - req, err := http.NewRequest(http.MethodDelete, u.String(), nil) - if err != nil { - return err - } - - resp, err := c.Do(req.WithContext(o.context)) - if err != nil { - return err - } - defer resp.Body.Close() - - return transport.CheckError(resp, http.StatusOK, http.StatusAccepted) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go deleted file mode 100644 index a13f01b68e..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/google/go-containerregistry/internal/verify" - "github.com/google/go-containerregistry/pkg/logs" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// ErrSchema1 indicates that we received a schema1 manifest from the registry. -// This library doesn't have plans to support this legacy image format: -// https://github.com/google/go-containerregistry/issues/377 -type ErrSchema1 struct { - schema string -} - -// newErrSchema1 returns an ErrSchema1 with the unexpected MediaType. -func newErrSchema1(schema types.MediaType) error { - return &ErrSchema1{ - schema: string(schema), - } -} - -// Error implements error. -func (e *ErrSchema1) Error() string { - return fmt.Sprintf("unsupported MediaType: %q, see https://github.com/google/go-containerregistry/issues/377", e.schema) -} - -// Descriptor provides access to metadata about remote artifact and accessors -// for efficiently converting it into a v1.Image or v1.ImageIndex. -type Descriptor struct { - fetcher - v1.Descriptor - Manifest []byte - - // So we can share this implementation with Image.. - platform v1.Platform -} - -// RawManifest exists to satisfy the Taggable interface. -func (d *Descriptor) RawManifest() ([]byte, error) { - return d.Manifest, nil -} - -// Get returns a remote.Descriptor for the given reference. The response from -// the registry is left un-interpreted, for the most part. This is useful for -// querying what kind of artifact a reference represents. -// -// See Head if you don't need the response body. -func Get(ref name.Reference, options ...Option) (*Descriptor, error) { - acceptable := []types.MediaType{ - // Just to look at them. - types.DockerManifestSchema1, - types.DockerManifestSchema1Signed, - } - acceptable = append(acceptable, acceptableImageMediaTypes...) - acceptable = append(acceptable, acceptableIndexMediaTypes...) - return get(ref, acceptable, options...) -} - -// Head returns a v1.Descriptor for the given reference by issuing a HEAD -// request. -// -// Note that the server response will not have a body, so any errors encountered -// should be retried with Get to get more details. -func Head(ref name.Reference, options ...Option) (*v1.Descriptor, error) { - acceptable := []types.MediaType{ - // Just to look at them. - types.DockerManifestSchema1, - types.DockerManifestSchema1Signed, - } - acceptable = append(acceptable, acceptableImageMediaTypes...) - acceptable = append(acceptable, acceptableIndexMediaTypes...) - - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return nil, err - } - - f, err := makeFetcher(ref, o) - if err != nil { - return nil, err - } - - return f.headManifest(ref, acceptable) -} - -// Handle options and fetch the manifest with the acceptable MediaTypes in the -// Accept header. -func get(ref name.Reference, acceptable []types.MediaType, options ...Option) (*Descriptor, error) { - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return nil, err - } - f, err := makeFetcher(ref, o) - if err != nil { - return nil, err - } - b, desc, err := f.fetchManifest(ref, acceptable) - if err != nil { - return nil, err - } - return &Descriptor{ - fetcher: *f, - Manifest: b, - Descriptor: *desc, - platform: o.platform, - }, nil -} - -// Image converts the Descriptor into a v1.Image. -// -// If the fetched artifact is already an image, it will just return it. -// -// If the fetched artifact is an index, it will attempt to resolve the index to -// a child image with the appropriate platform. -// -// See WithPlatform to set the desired platform. -func (d *Descriptor) Image() (v1.Image, error) { - switch d.MediaType { - case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: - // We don't care to support schema 1 images: - // https://github.com/google/go-containerregistry/issues/377 - return nil, newErrSchema1(d.MediaType) - case types.OCIImageIndex, types.DockerManifestList: - // We want an image but the registry has an index, resolve it to an image. - return d.remoteIndex().imageByPlatform(d.platform) - case types.OCIManifestSchema1, types.DockerManifestSchema2: - // These are expected. Enumerated here to allow a default case. - default: - // We could just return an error here, but some registries (e.g. static - // registries) don't set the Content-Type headers correctly, so instead... - logs.Warn.Printf("Unexpected media type for Image(): %s", d.MediaType) - } - - // Wrap the v1.Layers returned by this v1.Image in a hint for downstream - // remote.Write calls to facilitate cross-repo "mounting". - imgCore, err := partial.CompressedToImage(d.remoteImage()) - if err != nil { - return nil, err - } - return &mountableImage{ - Image: imgCore, - Reference: d.Ref, - }, nil -} - -// ImageIndex converts the Descriptor into a v1.ImageIndex. -func (d *Descriptor) ImageIndex() (v1.ImageIndex, error) { - switch d.MediaType { - case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: - // We don't care to support schema 1 images: - // https://github.com/google/go-containerregistry/issues/377 - return nil, newErrSchema1(d.MediaType) - case types.OCIManifestSchema1, types.DockerManifestSchema2: - // We want an index but the registry has an image, nothing we can do. - return nil, fmt.Errorf("unexpected media type for ImageIndex(): %s; call Image() instead", d.MediaType) - case types.OCIImageIndex, types.DockerManifestList: - // These are expected. - default: - // We could just return an error here, but some registries (e.g. static - // registries) don't set the Content-Type headers correctly, so instead... - logs.Warn.Printf("Unexpected media type for ImageIndex(): %s", d.MediaType) - } - return d.remoteIndex(), nil -} - -func (d *Descriptor) remoteImage() *remoteImage { - return &remoteImage{ - fetcher: d.fetcher, - manifest: d.Manifest, - mediaType: d.MediaType, - descriptor: &d.Descriptor, - } -} - -func (d *Descriptor) remoteIndex() *remoteIndex { - return &remoteIndex{ - fetcher: d.fetcher, - manifest: d.Manifest, - mediaType: d.MediaType, - descriptor: &d.Descriptor, - } -} - -// fetcher implements methods for reading from a registry. -type fetcher struct { - Ref name.Reference - Client *http.Client - context context.Context -} - -func makeFetcher(ref name.Reference, o *options) (*fetcher, error) { - tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, []string{ref.Scope(transport.PullScope)}) - if err != nil { - return nil, err - } - return &fetcher{ - Ref: ref, - Client: &http.Client{Transport: tr}, - context: o.context, - }, nil -} - -// url returns a url.Url for the specified path in the context of this remote image reference. -func (f *fetcher) url(resource, identifier string) url.URL { - return url.URL{ - Scheme: f.Ref.Context().Registry.Scheme(), - Host: f.Ref.Context().RegistryStr(), - Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier), - } -} - -func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) { - u := f.url("manifests", ref.Identifier()) - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, nil, err - } - accept := []string{} - for _, mt := range acceptable { - accept = append(accept, string(mt)) - } - req.Header.Set("Accept", strings.Join(accept, ",")) - - resp, err := f.Client.Do(req.WithContext(f.context)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, nil, err - } - - manifest, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, nil, err - } - - digest, size, err := v1.SHA256(bytes.NewReader(manifest)) - if err != nil { - return nil, nil, err - } - - mediaType := types.MediaType(resp.Header.Get("Content-Type")) - contentDigest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest")) - if err == nil && mediaType == types.DockerManifestSchema1Signed { - // If we can parse the digest from the header, and it's a signed schema 1 - // manifest, let's use that for the digest to appease older registries. - digest = contentDigest - } - - // Validate the digest matches what we asked for, if pulling by digest. - if dgst, ok := ref.(name.Digest); ok { - if digest.String() != dgst.DigestStr() { - return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) - } - } - // Do nothing for tags; I give up. - // - // We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry, - // but so many registries implement this incorrectly that it's not worth checking. - // - // For reference: - // https://github.com/GoogleContainerTools/kaniko/issues/298 - - // Return all this info since we have to calculate it anyway. - desc := v1.Descriptor{ - Digest: digest, - Size: size, - MediaType: mediaType, - } - - return manifest, &desc, nil -} - -func (f *fetcher) headManifest(ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) { - u := f.url("manifests", ref.Identifier()) - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return nil, err - } - accept := []string{} - for _, mt := range acceptable { - accept = append(accept, string(mt)) - } - req.Header.Set("Accept", strings.Join(accept, ",")) - - resp, err := f.Client.Do(req.WithContext(f.context)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - mth := resp.Header.Get("Content-Type") - if mth == "" { - return nil, fmt.Errorf("HEAD %s: response did not include Content-Type header", u.String()) - } - mediaType := types.MediaType(mth) - - lh := resp.Header.Get("Content-Length") - if lh == "" { - return nil, fmt.Errorf("HEAD %s: response did not include Content-Length header", u.String()) - } - size, err := strconv.ParseInt(lh, 10, 64) - if err != nil { - return nil, err - } - - dh := resp.Header.Get("Docker-Content-Digest") - if dh == "" { - return nil, fmt.Errorf("HEAD %s: response did not include Docker-Content-Digest header", u.String()) - } - digest, err := v1.NewHash(dh) - if err != nil { - return nil, err - } - - // Validate the digest matches what we asked for, if pulling by digest. - if dgst, ok := ref.(name.Digest); ok { - if digest.String() != dgst.DigestStr() { - return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) - } - } - - // Return all this info since we have to calculate it anyway. - return &v1.Descriptor{ - Digest: digest, - Size: size, - MediaType: mediaType, - }, nil -} - -func (f *fetcher) fetchBlob(ctx context.Context, h v1.Hash) (io.ReadCloser, error) { - u := f.url("blobs", h.String()) - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - - resp, err := f.Client.Do(req.WithContext(ctx)) - if err != nil { - return nil, err - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - resp.Body.Close() - return nil, err - } - - return verify.ReadCloser(resp.Body, h) -} - -func (f *fetcher) headBlob(h v1.Hash) (*http.Response, error) { - u := f.url("blobs", h.String()) - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return nil, err - } - - resp, err := f.Client.Do(req.WithContext(f.context)) - if err != nil { - return nil, err - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - resp.Body.Close() - return nil, err - } - - return resp, nil -} - -func (f *fetcher) blobExists(h v1.Hash) (bool, error) { - u := f.url("blobs", h.String()) - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return false, err - } - - resp, err := f.Client.Do(req.WithContext(f.context)) - if err != nil { - return false, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { - return false, err - } - - return resp.StatusCode == http.StatusOK, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go deleted file mode 100644 index 846ba07cda..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package remote provides facilities for reading/writing v1.Images from/to -// a remote image registry. -package remote diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go deleted file mode 100644 index 71739fee35..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "io" - "io/ioutil" - "net/http" - "net/url" - "sync" - - "github.com/google/go-containerregistry/internal/redact" - "github.com/google/go-containerregistry/internal/verify" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -var acceptableImageMediaTypes = []types.MediaType{ - types.DockerManifestSchema2, - types.OCIManifestSchema1, -} - -// remoteImage accesses an image from a remote registry -type remoteImage struct { - fetcher - manifestLock sync.Mutex // Protects manifest - manifest []byte - configLock sync.Mutex // Protects config - config []byte - mediaType types.MediaType - descriptor *v1.Descriptor -} - -var _ partial.CompressedImageCore = (*remoteImage)(nil) - -// Image provides access to a remote image reference. -func Image(ref name.Reference, options ...Option) (v1.Image, error) { - desc, err := Get(ref, options...) - if err != nil { - return nil, err - } - - return desc.Image() -} - -func (r *remoteImage) MediaType() (types.MediaType, error) { - if string(r.mediaType) != "" { - return r.mediaType, nil - } - return types.DockerManifestSchema2, nil -} - -func (r *remoteImage) RawManifest() ([]byte, error) { - r.manifestLock.Lock() - defer r.manifestLock.Unlock() - if r.manifest != nil { - return r.manifest, nil - } - - // NOTE(jonjohnsonjr): We should never get here because the public entrypoints - // do type-checking via remote.Descriptor. I've left this here for tests that - // directly instantiate a remoteImage. - manifest, desc, err := r.fetchManifest(r.Ref, acceptableImageMediaTypes) - if err != nil { - return nil, err - } - - if r.descriptor == nil { - r.descriptor = desc - } - r.mediaType = desc.MediaType - r.manifest = manifest - return r.manifest, nil -} - -func (r *remoteImage) RawConfigFile() ([]byte, error) { - r.configLock.Lock() - defer r.configLock.Unlock() - if r.config != nil { - return r.config, nil - } - - m, err := partial.Manifest(r) - if err != nil { - return nil, err - } - - body, err := r.fetchBlob(r.context, m.Config.Digest) - if err != nil { - return nil, err - } - defer body.Close() - - r.config, err = ioutil.ReadAll(body) - if err != nil { - return nil, err - } - return r.config, nil -} - -// Descriptor retains the original descriptor from an index manifest. -// See partial.Descriptor. -func (r *remoteImage) Descriptor() (*v1.Descriptor, error) { - // kind of a hack, but RawManifest does appropriate locking/memoization - // and makes sure r.descriptor is populated. - _, err := r.RawManifest() - return r.descriptor, err -} - -// remoteImageLayer implements partial.CompressedLayer -type remoteImageLayer struct { - ri *remoteImage - digest v1.Hash -} - -// Digest implements partial.CompressedLayer -func (rl *remoteImageLayer) Digest() (v1.Hash, error) { - return rl.digest, nil -} - -// Compressed implements partial.CompressedLayer -func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) { - urls := []url.URL{rl.ri.url("blobs", rl.digest.String())} - - // Add alternative layer sources from URLs (usually none). - d, err := partial.BlobDescriptor(rl, rl.digest) - if err != nil { - return nil, err - } - - // We don't want to log binary layers -- this can break terminals. - ctx := redact.NewContext(rl.ri.context, "omitting binary blobs from logs") - - for _, s := range d.URLs { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - urls = append(urls, *u) - } - - // The lastErr for most pulls will be the same (the first error), but for - // foreign layers we'll want to surface the last one, since we try to pull - // from the registry first, which would often fail. - // TODO: Maybe we don't want to try pulling from the registry first? - var lastErr error - for _, u := range urls { - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - - resp, err := rl.ri.Client.Do(req.WithContext(ctx)) - if err != nil { - lastErr = err - continue - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - resp.Body.Close() - lastErr = err - continue - } - - return verify.ReadCloser(resp.Body, rl.digest) - } - - return nil, lastErr -} - -// Manifest implements partial.WithManifest so that we can use partial.BlobSize below. -func (rl *remoteImageLayer) Manifest() (*v1.Manifest, error) { - return partial.Manifest(rl.ri) -} - -// MediaType implements v1.Layer -func (rl *remoteImageLayer) MediaType() (types.MediaType, error) { - bd, err := partial.BlobDescriptor(rl, rl.digest) - if err != nil { - return "", err - } - - return bd.MediaType, nil -} - -// Size implements partial.CompressedLayer -func (rl *remoteImageLayer) Size() (int64, error) { - // Look up the size of this digest in the manifest to avoid a request. - return partial.BlobSize(rl, rl.digest) -} - -// ConfigFile implements partial.WithManifestAndConfigFile so that we can use partial.BlobToDiffID below. -func (rl *remoteImageLayer) ConfigFile() (*v1.ConfigFile, error) { - return partial.ConfigFile(rl.ri) -} - -// DiffID implements partial.WithDiffID so that we don't recompute a DiffID that we already have -// available in our ConfigFile. -func (rl *remoteImageLayer) DiffID() (v1.Hash, error) { - return partial.BlobToDiffID(rl, rl.digest) -} - -// Descriptor retains the original descriptor from an image manifest. -// See partial.Descriptor. -func (rl *remoteImageLayer) Descriptor() (*v1.Descriptor, error) { - return partial.BlobDescriptor(rl, rl.digest) -} - -// See partial.Exists. -func (rl *remoteImageLayer) Exists() (bool, error) { - return rl.ri.blobExists(rl.digest) -} - -// LayerByDigest implements partial.CompressedLayer -func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { - return &remoteImageLayer{ - ri: r, - digest: h, - }, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go deleted file mode 100644 index c139343527..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "fmt" - "sync" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -var acceptableIndexMediaTypes = []types.MediaType{ - types.DockerManifestList, - types.OCIImageIndex, -} - -// remoteIndex accesses an index from a remote registry -type remoteIndex struct { - fetcher - manifestLock sync.Mutex // Protects manifest - manifest []byte - mediaType types.MediaType - descriptor *v1.Descriptor -} - -// Index provides access to a remote index reference. -func Index(ref name.Reference, options ...Option) (v1.ImageIndex, error) { - desc, err := get(ref, acceptableIndexMediaTypes, options...) - if err != nil { - return nil, err - } - - return desc.ImageIndex() -} - -func (r *remoteIndex) MediaType() (types.MediaType, error) { - if string(r.mediaType) != "" { - return r.mediaType, nil - } - return types.DockerManifestList, nil -} - -func (r *remoteIndex) Digest() (v1.Hash, error) { - return partial.Digest(r) -} - -func (r *remoteIndex) Size() (int64, error) { - return partial.Size(r) -} - -func (r *remoteIndex) RawManifest() ([]byte, error) { - r.manifestLock.Lock() - defer r.manifestLock.Unlock() - if r.manifest != nil { - return r.manifest, nil - } - - // NOTE(jonjohnsonjr): We should never get here because the public entrypoints - // do type-checking via remote.Descriptor. I've left this here for tests that - // directly instantiate a remoteIndex. - manifest, desc, err := r.fetchManifest(r.Ref, acceptableIndexMediaTypes) - if err != nil { - return nil, err - } - - if r.descriptor == nil { - r.descriptor = desc - } - r.mediaType = desc.MediaType - r.manifest = manifest - return r.manifest, nil -} - -func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) { - b, err := r.RawManifest() - if err != nil { - return nil, err - } - return v1.ParseIndexManifest(bytes.NewReader(b)) -} - -func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) { - desc, err := r.childByHash(h) - if err != nil { - return nil, err - } - - // Descriptor.Image will handle coercing nested indexes into an Image. - return desc.Image() -} - -// Descriptor retains the original descriptor from an index manifest. -// See partial.Descriptor. -func (r *remoteIndex) Descriptor() (*v1.Descriptor, error) { - // kind of a hack, but RawManifest does appropriate locking/memoization - // and makes sure r.descriptor is populated. - _, err := r.RawManifest() - return r.descriptor, err -} - -func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { - desc, err := r.childByHash(h) - if err != nil { - return nil, err - } - return desc.ImageIndex() -} - -// Workaround for #819. -func (r *remoteIndex) Layer(h v1.Hash) (v1.Layer, error) { - index, err := r.IndexManifest() - if err != nil { - return nil, err - } - for _, childDesc := range index.Manifests { - if h == childDesc.Digest { - l, err := partial.CompressedToLayer(&remoteLayer{ - fetcher: r.fetcher, - digest: h, - }) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: r.Ref.Context().Digest(h.String()), - }, nil - } - } - return nil, fmt.Errorf("layer not found: %s", h) -} - -func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) { - desc, err := r.childByPlatform(platform) - if err != nil { - return nil, err - } - - // Descriptor.Image will handle coercing nested indexes into an Image. - return desc.Image() -} - -// This naively matches the first manifest with matching platform attributes. -// -// We should probably use this instead: -// github.com/containerd/containerd/platforms -// -// But first we'd need to migrate to: -// github.com/opencontainers/image-spec/specs-go/v1 -func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error) { - index, err := r.IndexManifest() - if err != nil { - return nil, err - } - for _, childDesc := range index.Manifests { - // If platform is missing from child descriptor, assume it's amd64/linux. - p := defaultPlatform - if childDesc.Platform != nil { - p = *childDesc.Platform - } - - if matchesPlatform(p, platform) { - return r.childDescriptor(childDesc, platform) - } - } - return nil, fmt.Errorf("no child with platform %s/%s in index %s", platform.OS, platform.Architecture, r.Ref) -} - -func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) { - index, err := r.IndexManifest() - if err != nil { - return nil, err - } - for _, childDesc := range index.Manifests { - if h == childDesc.Digest { - return r.childDescriptor(childDesc, defaultPlatform) - } - } - return nil, fmt.Errorf("no child with digest %s in index %s", h, r.Ref) -} - -// Convert one of this index's child's v1.Descriptor into a remote.Descriptor, with the given platform option. -func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform) (*Descriptor, error) { - ref := r.Ref.Context().Digest(child.Digest.String()) - manifest, _, err := r.fetchManifest(ref, []types.MediaType{child.MediaType}) - if err != nil { - return nil, err - } - return &Descriptor{ - fetcher: fetcher{ - Ref: ref, - Client: r.Client, - context: r.context, - }, - Manifest: manifest, - Descriptor: child, - platform: platform, - }, nil -} - -// matchesPlatform checks if the given platform matches the required platforms. -// The given platform matches the required platform if -// - architecture and OS are identical. -// - OS version and variant are identical if provided. -// - features and OS features of the required platform are subsets of those of the given platform. -func matchesPlatform(given, required v1.Platform) bool { - // Required fields that must be identical. - if given.Architecture != required.Architecture || given.OS != required.OS { - return false - } - - // Optional fields that may be empty, but must be identical if provided. - if required.OSVersion != "" && given.OSVersion != required.OSVersion { - return false - } - if required.Variant != "" && given.Variant != required.Variant { - return false - } - - // Verify required platform's features are a subset of given platform's features. - if !isSubset(given.OSFeatures, required.OSFeatures) { - return false - } - if !isSubset(given.Features, required.Features) { - return false - } - - return true -} - -// isSubset checks if the required array of strings is a subset of the given lst. -func isSubset(lst, required []string) bool { - set := make(map[string]bool) - for _, value := range lst { - set[value] = true - } - - for _, value := range required { - if _, ok := set[value]; !ok { - return false - } - } - - return true -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go deleted file mode 100644 index 1501672d3a..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "io" - - "github.com/google/go-containerregistry/internal/redact" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -// remoteImagelayer implements partial.CompressedLayer -type remoteLayer struct { - fetcher - digest v1.Hash -} - -// Compressed implements partial.CompressedLayer -func (rl *remoteLayer) Compressed() (io.ReadCloser, error) { - // We don't want to log binary layers -- this can break terminals. - ctx := redact.NewContext(rl.context, "omitting binary blobs from logs") - return rl.fetchBlob(ctx, rl.digest) -} - -// Compressed implements partial.CompressedLayer -func (rl *remoteLayer) Size() (int64, error) { - resp, err := rl.headBlob(rl.digest) - if err != nil { - return -1, err - } - defer resp.Body.Close() - return resp.ContentLength, nil -} - -// Digest implements partial.CompressedLayer -func (rl *remoteLayer) Digest() (v1.Hash, error) { - return rl.digest, nil -} - -// MediaType implements v1.Layer -func (rl *remoteLayer) MediaType() (types.MediaType, error) { - return types.DockerLayer, nil -} - -// See partial.Exists. -func (rl *remoteLayer) Exists() (bool, error) { - return rl.blobExists(rl.digest) -} - -// Layer reads the given blob reference from a registry as a Layer. A blob -// reference here is just a punned name.Digest where the digest portion is the -// digest of the blob to be read and the repository portion is the repo where -// that blob lives. -func Layer(ref name.Digest, options ...Option) (v1.Layer, error) { - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return nil, err - } - f, err := makeFetcher(ref, o) - if err != nil { - return nil, err - } - h, err := v1.NewHash(ref.Identifier()) - if err != nil { - return nil, err - } - l, err := partial.CompressedToLayer(&remoteLayer{ - fetcher: *f, - digest: h, - }) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: ref, - }, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go deleted file mode 100644 index e4a005aa58..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -type tags struct { - Name string `json:"name"` - Tags []string `json:"tags"` -} - -// List wraps ListWithContext using the background context. -func List(repo name.Repository, options ...Option) ([]string, error) { - return ListWithContext(context.Background(), repo, options...) -} - -// ListWithContext calls /tags/list for the given repository, returning the list of tags -// in the "tags" property. -func ListWithContext(ctx context.Context, repo name.Repository, options ...Option) ([]string, error) { - o, err := makeOptions(repo, options...) - if err != nil { - return nil, err - } - scopes := []string{repo.Scope(transport.PullScope)} - tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) - if err != nil { - return nil, err - } - - uri := &url.URL{ - Scheme: repo.Registry.Scheme(), - Host: repo.Registry.RegistryStr(), - Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()), - // ECR returns an error if n > 1000: - // https://github.com/google/go-containerregistry/issues/681 - RawQuery: "n=1000", - } - - // This is lazy, but I want to make sure List(..., WithContext(ctx)) works - // without calling makeOptions() twice (which can have side effects). - // This means ListWithContext(ctx, ..., WithContext(ctx2)) prefers ctx2. - if o.context != context.Background() { - ctx = o.context - } - - client := http.Client{Transport: tr} - tagList := []string{} - parsed := tags{} - - // get responses until there is no next page - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - req, err := http.NewRequest("GET", uri.String(), nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { - return nil, err - } - - if err := resp.Body.Close(); err != nil { - return nil, err - } - - tagList = append(tagList, parsed.Tags...) - - uri, err = getNextPageURL(resp) - if err != nil { - return nil, err - } - // no next page - if uri == nil { - break - } - } - - return tagList, nil -} - -// getNextPageURL checks if there is a Link header in a http.Response which -// contains a link to the next page. If yes it returns the url.URL of the next -// page otherwise it returns nil. -func getNextPageURL(resp *http.Response) (*url.URL, error) { - link := resp.Header.Get("Link") - if link == "" { - return nil, nil - } - - if link[0] != '<' { - return nil, fmt.Errorf("failed to parse link header: missing '<' in: %s", link) - } - - end := strings.Index(link, ">") - if end == -1 { - return nil, fmt.Errorf("failed to parse link header: missing '>' in: %s", link) - } - link = link[1:end] - - linkURL, err := url.Parse(link) - if err != nil { - return nil, err - } - if resp.Request == nil || resp.Request.URL == nil { - return nil, nil - } - linkURL = resp.Request.URL.ResolveReference(linkURL) - return linkURL, nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go deleted file mode 100644 index 728997044c..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" -) - -// MountableLayer wraps a v1.Layer in a shim that enables the layer to be -// "mounted" when published to another registry. -type MountableLayer struct { - v1.Layer - - Reference name.Reference -} - -// Descriptor retains the original descriptor from an image manifest. -// See partial.Descriptor. -func (ml *MountableLayer) Descriptor() (*v1.Descriptor, error) { - return partial.Descriptor(ml.Layer) -} - -// Exists is a hack. See partial.Exists. -func (ml *MountableLayer) Exists() (bool, error) { - return partial.Exists(ml.Layer) -} - -// mountableImage wraps the v1.Layer references returned by the embedded v1.Image -// in MountableLayer's so that remote.Write might attempt to mount them from their -// source repository. -type mountableImage struct { - v1.Image - - Reference name.Reference -} - -// Layers implements v1.Image -func (mi *mountableImage) Layers() ([]v1.Layer, error) { - ls, err := mi.Image.Layers() - if err != nil { - return nil, err - } - mls := make([]v1.Layer, 0, len(ls)) - for _, l := range ls { - mls = append(mls, &MountableLayer{ - Layer: l, - Reference: mi.Reference, - }) - } - return mls, nil -} - -// LayerByDigest implements v1.Image -func (mi *mountableImage) LayerByDigest(d v1.Hash) (v1.Layer, error) { - l, err := mi.Image.LayerByDigest(d) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: mi.Reference, - }, nil -} - -// LayerByDiffID implements v1.Image -func (mi *mountableImage) LayerByDiffID(d v1.Hash) (v1.Layer, error) { - l, err := mi.Image.LayerByDiffID(d) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: mi.Reference, - }, nil -} - -// Descriptor retains the original descriptor from an index manifest. -// See partial.Descriptor. -func (mi *mountableImage) Descriptor() (*v1.Descriptor, error) { - return partial.Descriptor(mi.Image) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go deleted file mode 100644 index 45408fc4a1..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "fmt" - "net/http" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/types" - "golang.org/x/sync/errgroup" -) - -// MultiWrite writes the given Images or ImageIndexes to the given refs, as -// efficiently as possible, by deduping shared layer blobs and uploading layers -// in parallel, then uploading all manifests in parallel. -// -// Current limitations: -// - All refs must share the same repository. -// - Images cannot consist of stream.Layers. -func MultiWrite(m map[name.Reference]Taggable, options ...Option) (rerr error) { - // Determine the repository being pushed to; if asked to push to - // multiple repositories, give up. - var repo, zero name.Repository - for ref := range m { - if repo == zero { - repo = ref.Context() - } else if ref.Context() != repo { - return fmt.Errorf("MultiWrite can only push to the same repository (saw %q and %q)", repo, ref.Context()) - } - } - - o, err := makeOptions(repo, options...) - if err != nil { - return err - } - - // Collect unique blobs (layers and config blobs). - blobs := map[v1.Hash]v1.Layer{} - newManifests := []map[name.Reference]Taggable{} - // Separate originally requested images and indexes, so we can push images first. - images, indexes := map[name.Reference]Taggable{}, map[name.Reference]Taggable{} - for ref, i := range m { - if img, ok := i.(v1.Image); ok { - images[ref] = i - if err := addImageBlobs(img, blobs, o.allowNondistributableArtifacts); err != nil { - return err - } - continue - } - if idx, ok := i.(v1.ImageIndex); ok { - indexes[ref] = i - newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, 0, o.allowNondistributableArtifacts) - if err != nil { - return err - } - continue - } - return fmt.Errorf("pushable resource was not Image or ImageIndex: %T", i) - } - - // Determine if any of the layers are Mountable, because if so we need - // to request Pull scope too. - ls := []v1.Layer{} - for _, l := range blobs { - ls = append(ls, l) - } - scopes := scopesForUploadingImage(repo, ls) - tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) - if err != nil { - return err - } - w := writer{ - repo: repo, - client: &http.Client{Transport: tr}, - context: o.context, - updates: o.updates, - lastUpdate: &v1.Update{}, - } - - // Collect the total size of blobs and manifests we're about to write. - if o.updates != nil { - defer close(o.updates) - defer func() { sendError(o.updates, rerr) }() - for _, b := range blobs { - size, err := b.Size() - if err != nil { - return err - } - w.lastUpdate.Total += size - } - countManifest := func(t Taggable) error { - b, err := t.RawManifest() - if err != nil { - return err - } - w.lastUpdate.Total += int64(len(b)) - return nil - } - for _, i := range images { - if err := countManifest(i); err != nil { - return err - } - } - for _, nm := range newManifests { - for _, i := range nm { - if err := countManifest(i); err != nil { - return err - } - } - } - for _, i := range indexes { - if err := countManifest(i); err != nil { - return err - } - } - } - - // Upload individual blobs and collect any errors. - blobChan := make(chan v1.Layer, 2*o.jobs) - g, ctx := errgroup.WithContext(o.context) - for i := 0; i < o.jobs; i++ { - // Start N workers consuming blobs to upload. - g.Go(func() error { - for b := range blobChan { - if err := w.uploadOne(b); err != nil { - return err - } - } - return nil - }) - } - g.Go(func() error { - defer close(blobChan) - for _, b := range blobs { - select { - case blobChan <- b: - case <-ctx.Done(): - return ctx.Err() - } - } - return nil - }) - if err := g.Wait(); err != nil { - return err - } - - commitMany := func(m map[name.Reference]Taggable) error { - // With all of the constituent elements uploaded, upload the manifests - // to commit the images and indexes, and collect any errors. - type task struct { - i Taggable - ref name.Reference - } - taskChan := make(chan task, 2*o.jobs) - for i := 0; i < o.jobs; i++ { - // Start N workers consuming tasks to upload manifests. - g.Go(func() error { - for t := range taskChan { - if err := w.commitManifest(t.i, t.ref); err != nil { - return err - } - } - return nil - }) - } - go func() { - for ref, i := range m { - taskChan <- task{i, ref} - } - close(taskChan) - }() - return g.Wait() - } - // Push originally requested image manifests. These have no - // dependencies. - if err := commitMany(images); err != nil { - return err - } - // Push new manifests from lowest levels up. - for i := len(newManifests) - 1; i >= 0; i-- { - if err := commitMany(newManifests[i]); err != nil { - return err - } - } - // Push originally requested index manifests, which might depend on - // newly discovered manifests. - - return commitMany(indexes) -} - -// addIndexBlobs adds blobs to the set of blobs we intend to upload, and -// returns the latest copy of the ordered collection of manifests to upload. -func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repository, newManifests []map[name.Reference]Taggable, lvl int, allowNondistributableArtifacts bool) ([]map[name.Reference]Taggable, error) { - if lvl > len(newManifests)-1 { - newManifests = append(newManifests, map[name.Reference]Taggable{}) - } - - im, err := idx.IndexManifest() - if err != nil { - return nil, err - } - for _, desc := range im.Manifests { - switch desc.MediaType { - case types.OCIImageIndex, types.DockerManifestList: - idx, err := idx.ImageIndex(desc.Digest) - if err != nil { - return nil, err - } - newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, lvl+1, allowNondistributableArtifacts) - if err != nil { - return nil, err - } - - // Also track the sub-index manifest to upload later by digest. - newManifests[lvl][repo.Digest(desc.Digest.String())] = idx - case types.OCIManifestSchema1, types.DockerManifestSchema2: - img, err := idx.Image(desc.Digest) - if err != nil { - return nil, err - } - if err := addImageBlobs(img, blobs, allowNondistributableArtifacts); err != nil { - return nil, err - } - - // Also track the sub-image manifest to upload later by digest. - newManifests[lvl][repo.Digest(desc.Digest.String())] = img - default: - // Workaround for #819. - if wl, ok := idx.(withLayer); ok { - layer, err := wl.Layer(desc.Digest) - if err != nil { - return nil, err - } - if err := addLayerBlob(layer, blobs, allowNondistributableArtifacts); err != nil { - return nil, err - } - } else { - return nil, fmt.Errorf("unknown media type: %v", desc.MediaType) - } - } - } - return newManifests, nil -} - -func addLayerBlob(l v1.Layer, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error { - // Ignore foreign layers. - mt, err := l.MediaType() - if err != nil { - return err - } - - if mt.IsDistributable() || allowNondistributableArtifacts { - d, err := l.Digest() - if err != nil { - return err - } - - blobs[d] = l - } - - return nil -} - -func addImageBlobs(img v1.Image, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error { - ls, err := img.Layers() - if err != nil { - return err - } - // Collect all layers. - for _, l := range ls { - if err := addLayerBlob(l, blobs, allowNondistributableArtifacts); err != nil { - return err - } - } - - // Collect config blob. - cl, err := partial.ConfigLayer(img) - if err != nil { - return err - } - return addLayerBlob(cl, blobs, allowNondistributableArtifacts) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go deleted file mode 100644 index 7edebdf779..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "context" - "errors" - "net/http" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/logs" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -// Option is a functional option for remote operations. -type Option func(*options) error - -type options struct { - auth authn.Authenticator - keychain authn.Keychain - transport http.RoundTripper - platform v1.Platform - context context.Context - jobs int - userAgent string - allowNondistributableArtifacts bool - updates chan<- v1.Update -} - -var defaultPlatform = v1.Platform{ - Architecture: "amd64", - OS: "linux", -} - -const defaultJobs = 4 - -func makeOptions(target authn.Resource, opts ...Option) (*options, error) { - o := &options{ - auth: authn.Anonymous, - transport: http.DefaultTransport, - platform: defaultPlatform, - context: context.Background(), - jobs: defaultJobs, - } - - for _, option := range opts { - if err := option(o); err != nil { - return nil, err - } - } - - if o.keychain != nil { - auth, err := o.keychain.Resolve(target) - if err != nil { - return nil, err - } - o.auth = auth - } - - // Wrap the transport in something that logs requests and responses. - // It's expensive to generate the dumps, so skip it if we're writing - // to nothing. - if logs.Enabled(logs.Debug) { - o.transport = transport.NewLogger(o.transport) - } - - // Wrap the transport in something that can retry network flakes. - o.transport = transport.NewRetry(o.transport) - - // Wrap this last to prevent transport.New from double-wrapping. - if o.userAgent != "" { - o.transport = transport.NewUserAgent(o.transport, o.userAgent) - } - - return o, nil -} - -// WithTransport is a functional option for overriding the default transport -// for remote operations. -// -// The default transport its http.DefaultTransport. -func WithTransport(t http.RoundTripper) Option { - return func(o *options) error { - o.transport = t - return nil - } -} - -// WithAuth is a functional option for overriding the default authenticator -// for remote operations. -// -// The default authenticator is authn.Anonymous. -func WithAuth(auth authn.Authenticator) Option { - return func(o *options) error { - o.auth = auth - return nil - } -} - -// WithAuthFromKeychain is a functional option for overriding the default -// authenticator for remote operations, using an authn.Keychain to find -// credentials. -// -// The default authenticator is authn.Anonymous. -func WithAuthFromKeychain(keys authn.Keychain) Option { - return func(o *options) error { - o.keychain = keys - return nil - } -} - -// WithPlatform is a functional option for overriding the default platform -// that Image and Descriptor.Image use for resolving an index to an image. -// -// The default platform is amd64/linux. -func WithPlatform(p v1.Platform) Option { - return func(o *options) error { - o.platform = p - return nil - } -} - -// WithContext is a functional option for setting the context in http requests -// performed by a given function. Note that this context is used for _all_ -// http requests, not just the initial volley. E.g., for remote.Image, the -// context will be set on http requests generated by subsequent calls to -// RawConfigFile() and even methods on layers returned by Layers(). -// -// The default context is context.Background(). -func WithContext(ctx context.Context) Option { - return func(o *options) error { - o.context = ctx - return nil - } -} - -// WithJobs is a functional option for setting the parallelism of remote -// operations performed by a given function. Note that not all remote -// operations support parallelism. -// -// The default value is 4. -func WithJobs(jobs int) Option { - return func(o *options) error { - if jobs <= 0 { - return errors.New("jobs must be greater than zero") - } - o.jobs = jobs - return nil - } -} - -// WithUserAgent adds the given string to the User-Agent header for any HTTP -// requests. This header will also include "go-containerregistry/${version}". -// -// If you want to completely overwrite the User-Agent header, use WithTransport. -func WithUserAgent(ua string) Option { - return func(o *options) error { - o.userAgent = ua - return nil - } -} - -// WithNondistributable includes non-distributable (foreign) layers -// when writing images, see: -// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers -// -// The default behaviour is to skip these layers -func WithNondistributable(o *options) error { - o.allowNondistributableArtifacts = true - return nil -} - -// WithProgress takes a channel that will receive progress updates as bytes are written. -// -// Sending updates to an unbuffered channel will block writes, so callers -// should provide a buffered channel to avoid potential deadlocks. -func WithProgress(updates chan<- v1.Update) Option { - return func(o *options) error { - o.updates = updates - return nil - } -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md deleted file mode 100644 index bd4d957b0e..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# `transport` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport) - -The [distribution protocol](https://github.com/opencontainers/distribution-spec) is fairly simple, but correctly [implementing authentication](../../../authn/README.md) is **hard**. - -This package [implements](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#New) an [`http.RoundTripper`](https://godoc.org/net/http#RoundTripper) -that transparently performs: -* [Token -Authentication](https://docs.docker.com/registry/spec/auth/token/) and -* [OAuth2 -Authentication](https://docs.docker.com/registry/spec/auth/oauth/) - -for registry clients. - -## Raison d'être - -> Why not just use the [`docker/distribution`](https://godoc.org/github.com/docker/distribution/registry/client/auth) client? - -Great question! Mostly, because I don't want to depend on [`prometheus/client_golang`](https://github.com/prometheus/client_golang). - -As a performance optimization, that client uses [a cache](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/client/repository.go#L173) to keep track of a mapping between blob digests and their [descriptors](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/blobs.go#L57-L86). Unfortunately, the cache [uses prometheus](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/storage/cache/cachedblobdescriptorstore.go#L44) to track hits and misses, so if you want to use that client you have to pull in all of prometheus, which is pretty large. - -![docker/distribution](../../../../images/docker.dot.svg) - -> Why does it matter if you depend on prometheus? Who cares? - -It's generally polite to your downstream to reduce the number of dependencies your package requires: - -* Downloading your package is faster, which helps our Australian friends and people on airplanes. -* There is less code to compile, which speeds up builds and saves the planet from global warming. -* You reduce the likelihood of inflicting dependency hell upon your consumers. -* [Tim Hockin](https://twitter.com/thockin/status/958606077456654336) prefers it based on his experience working on Kubernetes, and he's a pretty smart guy. - -> Okay, what about [`containerd/containerd`](https://godoc.org/github.com/containerd/containerd/remotes/docker)? - -Similar reasons! That ends up pulling in grpc, protobuf, and logrus. - -![containerd/containerd](../../../../images/containerd.dot.svg) - -> Well... what about [`containers/image`](https://godoc.org/github.com/containers/image/docker)? - -That just uses the the `docker/distribution` client... and more! - -![containers/image](../../../../images/containers.dot.svg) - -> Wow, what about this package? - -Of course, this package isn't perfect either. `transport` depends on `authn`, -which in turn depends on docker's config file parsing and handling package, -which you don't strictly need but almost certainly want if you're going to be -interacting with a registry. - -![google/go-containerregistry](../../../../images/ggcr.dot.svg) - -*These graphs were generated by -[`kisielk/godepgraph`](https://github.com/kisielk/godepgraph).* - -## Usage - -This is heavily used by the -[`remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) -package, which implements higher level image-centric functionality, but this -package is useful if you want to interact directly with the registry to do -something that `remote` doesn't support, e.g. [to handle with schema 1 -images](https://github.com/google/go-containerregistry/pull/509). - -This package also includes some [error -handling](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#errors) -facilities in the form of -[`CheckError`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#CheckError), -which will parse the response body into a structured error for unexpected http -status codes. - -Here's a "simple" program that writes the result of -[listing tags](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#tags) -for [`gcr.io/google-containers/pause`](https://gcr.io/google-containers/pause) -to stdout. - -```go -package main - -import ( - "io" - "net/http" - "os" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" -) - -func main() { - repo, err := name.NewRepository("gcr.io/google-containers/pause") - if err != nil { - panic(err) - } - - // Fetch credentials based on your docker config file, which is $HOME/.docker/config.json or $DOCKER_CONFIG. - auth, err := authn.DefaultKeychain.Resolve(repo.Registry) - if err != nil { - panic(err) - } - - // Construct an http.Client that is authorized to pull from gcr.io/google-containers/pause. - scopes := []string{repo.Scope(transport.PullScope)} - t, err := transport.New(repo.Registry, auth, http.DefaultTransport, scopes) - if err != nil { - panic(err) - } - client := &http.Client{Transport: t} - - // Make the actual request. - resp, err := client.Get("https://gcr.io/v2/google-containers/pause/tags/list") - if err != nil { - panic(err) - } - - // Assert that we get a 200, otherwise attempt to parse body as a structured error. - if err := transport.CheckError(resp, http.StatusOK); err != nil { - panic(err) - } - - // Write the response to stdout. - if _, err := io.Copy(os.Stdout, resp.Body); err != nil { - panic(err) - } -} -``` diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go deleted file mode 100644 index fdb362b762..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "encoding/base64" - "fmt" - "net/http" - - "github.com/google/go-containerregistry/pkg/authn" -) - -type basicTransport struct { - inner http.RoundTripper - auth authn.Authenticator - target string -} - -var _ http.RoundTripper = (*basicTransport)(nil) - -// RoundTrip implements http.RoundTripper -func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) { - if bt.auth != authn.Anonymous { - auth, err := bt.auth.Authorization() - if err != nil { - return nil, err - } - - // http.Client handles redirects at a layer above the http.RoundTripper - // abstraction, so to avoid forwarding Authorization headers to places - // we are redirected, only set it when the authorization header matches - // the host with which we are interacting. - // In case of redirect http.Client can use an empty Host, check URL too. - if in.Host == bt.target || in.URL.Host == bt.target { - if bearer := auth.RegistryToken; bearer != "" { - hdr := fmt.Sprintf("Bearer %s", bearer) - in.Header.Set("Authorization", hdr) - } else if user, pass := auth.Username, auth.Password; user != "" && pass != "" { - delimited := fmt.Sprintf("%s:%s", user, pass) - encoded := base64.StdEncoding.EncodeToString([]byte(delimited)) - hdr := fmt.Sprintf("Basic %s", encoded) - in.Header.Set("Authorization", hdr) - } else if token := auth.Auth; token != "" { - hdr := fmt.Sprintf("Basic %s", token) - in.Header.Set("Authorization", hdr) - } - } - } - return bt.inner.RoundTrip(in) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go deleted file mode 100644 index 49941bd896..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "strings" - - authchallenge "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/google/go-containerregistry/internal/redact" - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/logs" - "github.com/google/go-containerregistry/pkg/name" -) - -type bearerTransport struct { - // Wrapped by bearerTransport. - inner http.RoundTripper - // Basic credentials that we exchange for bearer tokens. - basic authn.Authenticator - // Holds the bearer response from the token service. - bearer authn.AuthConfig - // Registry to which we send bearer tokens. - registry name.Registry - // See https://tools.ietf.org/html/rfc6750#section-3 - realm string - // See https://docs.docker.com/registry/spec/auth/token/ - service string - scopes []string - // Scheme we should use, determined by ping response. - scheme string -} - -var _ http.RoundTripper = (*bearerTransport)(nil) - -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -func stringSet(ss []string) map[string]struct{} { - set := make(map[string]struct{}) - for _, s := range ss { - set[s] = struct{}{} - } - return set -} - -// RoundTrip implements http.RoundTripper -func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { - sendRequest := func() (*http.Response, error) { - // http.Client handles redirects at a layer above the http.RoundTripper - // abstraction, so to avoid forwarding Authorization headers to places - // we are redirected, only set it when the authorization header matches - // the registry with which we are interacting. - // In case of redirect http.Client can use an empty Host, check URL too. - if matchesHost(bt.registry, in, bt.scheme) { - hdr := fmt.Sprintf("Bearer %s", bt.bearer.RegistryToken) - in.Header.Set("Authorization", hdr) - } - return bt.inner.RoundTrip(in) - } - - res, err := sendRequest() - if err != nil { - return nil, err - } - - // If we hit a WWW-Authenticate challenge, it might be due to expired tokens or insufficient scope. - if challenges := authchallenge.ResponseChallenges(res); len(challenges) != 0 { - for _, wac := range challenges { - // TODO(jonjohnsonjr): Should we also update "realm" or "service"? - if scope, ok := wac.Parameters["scope"]; ok { - // From https://tools.ietf.org/html/rfc6750#section-3 - // The "scope" attribute is defined in Section 3.3 of [RFC6749]. The - // "scope" attribute is a space-delimited list of case-sensitive scope - // values indicating the required scope of the access token for - // accessing the requested resource. - scopes := strings.Split(scope, " ") - - // Add any scopes that we don't already request. - got := stringSet(bt.scopes) - for _, want := range scopes { - if _, ok := got[want]; !ok { - bt.scopes = append(bt.scopes, want) - } - } - } - } - - // TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge. - - // Retry the request to attempt to get a valid token. - if err = bt.refresh(in.Context()); err != nil { - return nil, err - } - return sendRequest() - } - - return res, err -} - -// It's unclear which authentication flow to use based purely on the protocol, -// so we rely on heuristics and fallbacks to support as many registries as possible. -// The basic token exchange is attempted first, falling back to the oauth flow. -// If the IdentityToken is set, this indicates that we should start with the oauth flow. -func (bt *bearerTransport) refresh(ctx context.Context) error { - auth, err := bt.basic.Authorization() - if err != nil { - return err - } - - if auth.RegistryToken != "" { - bt.bearer.RegistryToken = auth.RegistryToken - return nil - } - - var content []byte - if auth.IdentityToken != "" { - // If the secret being stored is an identity token, - // the Username should be set to , which indicates - // we are using an oauth flow. - content, err = bt.refreshOauth(ctx) - if terr, ok := err.(*Error); ok && terr.StatusCode == http.StatusNotFound { - // Note: Not all token servers implement oauth2. - // If the request to the endpoint returns 404 using the HTTP POST method, - // refer to Token Documentation for using the HTTP GET method supported by all token servers. - content, err = bt.refreshBasic(ctx) - } - } else { - content, err = bt.refreshBasic(ctx) - } - if err != nil { - return err - } - - // Some registries don't have "token" in the response. See #54. - type tokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - // TODO: handle expiry? - } - - var response tokenResponse - if err := json.Unmarshal(content, &response); err != nil { - return err - } - - // Some registries set access_token instead of token. - if response.AccessToken != "" { - response.Token = response.AccessToken - } - - // Find a token to turn into a Bearer authenticator - if response.Token != "" { - bt.bearer.RegistryToken = response.Token - } else { - return fmt.Errorf("no token in bearer response:\n%s", content) - } - - // If we obtained a refresh token from the oauth flow, use that for refresh() now. - if response.RefreshToken != "" { - bt.basic = authn.FromConfig(authn.AuthConfig{ - IdentityToken: response.RefreshToken, - }) - } - - return nil -} - -func matchesHost(reg name.Registry, in *http.Request, scheme string) bool { - canonicalHeaderHost := canonicalAddress(in.Host, scheme) - canonicalURLHost := canonicalAddress(in.URL.Host, scheme) - canonicalRegistryHost := canonicalAddress(reg.RegistryStr(), scheme) - return canonicalHeaderHost == canonicalRegistryHost || canonicalURLHost == canonicalRegistryHost -} - -func canonicalAddress(host, scheme string) (address string) { - // The host may be any one of: - // - hostname - // - hostname:port - // - ipv4 - // - ipv4:port - // - ipv6 - // - [ipv6]:port - // As net.SplitHostPort returns an error if the host does not contain a port, we should only attempt - // to call it when we know that the address contains a port - if strings.Count(host, ":") == 1 || (strings.Count(host, ":") >= 2 && strings.Contains(host, "]:")) { - hostname, port, err := net.SplitHostPort(host) - if err != nil { - return host - } - if port == "" { - port = portMap[scheme] - } - - return net.JoinHostPort(hostname, port) - } - - return net.JoinHostPort(host, portMap[scheme]) -} - -// https://docs.docker.com/registry/spec/auth/oauth/ -func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) { - auth, err := bt.basic.Authorization() - if err != nil { - return nil, err - } - - u, err := url.Parse(bt.realm) - if err != nil { - return nil, err - } - - v := url.Values{} - v.Set("scope", strings.Join(bt.scopes, " ")) - v.Set("service", bt.service) - v.Set("client_id", defaultUserAgent) - if auth.IdentityToken != "" { - v.Set("grant_type", "refresh_token") - v.Set("refresh_token", auth.IdentityToken) - } else if auth.Username != "" && auth.Password != "" { - // TODO(#629): This is unreachable. - v.Set("grant_type", "password") - v.Set("username", auth.Username) - v.Set("password", auth.Password) - v.Set("access_type", "offline") - } - - client := http.Client{Transport: bt.inner} - req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - // We don't want to log credentials. - ctx = redact.NewContext(ctx, "oauth token response contains credentials") - - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := CheckError(resp, http.StatusOK); err != nil { - logs.Warn.Printf("No matching credentials were found for %q", bt.registry) - return nil, err - } - - return ioutil.ReadAll(resp.Body) -} - -// https://docs.docker.com/registry/spec/auth/token/ -func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) { - u, err := url.Parse(bt.realm) - if err != nil { - return nil, err - } - b := &basicTransport{ - inner: bt.inner, - auth: bt.basic, - target: u.Host, - } - client := http.Client{Transport: b} - - v := u.Query() - v["scope"] = bt.scopes - v.Set("service", bt.service) - u.RawQuery = v.Encode() - - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - - // We don't want to log credentials. - ctx = redact.NewContext(ctx, "basic token response contains credentials") - - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := CheckError(resp, http.StatusOK); err != nil { - logs.Warn.Printf("No matching credentials were found for %q", bt.registry) - return nil, err - } - - return ioutil.ReadAll(resp.Body) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go deleted file mode 100644 index ff7025b5c0..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport provides facilities for setting up an authenticated -// http.RoundTripper given an Authenticator and base RoundTripper. See -// transport.New for more information. -package transport diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go deleted file mode 100644 index bb59d22e4d..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" -) - -// The set of query string keys that we expect to send as part of the registry -// protocol. Anything else is potentially dangerous to leak, as it's probably -// from a redirect. These redirects often included tokens or signed URLs. -var paramAllowlist = map[string]struct{}{ - // Token exchange - "scope": {}, - "service": {}, - // Cross-repo mounting - "mount": {}, - "from": {}, - // Layer PUT - "digest": {}, - // Listing tags and catalog - "n": {}, - "last": {}, -} - -// Error implements error to support the following error specification: -// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors -type Error struct { - Errors []Diagnostic `json:"errors,omitempty"` - // The http status code returned. - StatusCode int - // The raw body if we couldn't understand it. - rawBody string - // The request that failed. - request *http.Request -} - -// Check that Error implements error -var _ error = (*Error)(nil) - -// Error implements error -func (e *Error) Error() string { - prefix := "" - if e.request != nil { - prefix = fmt.Sprintf("%s %s: ", e.request.Method, redactURL(e.request.URL)) - } - return prefix + e.responseErr() -} - -func (e *Error) responseErr() string { - switch len(e.Errors) { - case 0: - if len(e.rawBody) == 0 { - if e.request != nil && e.request.Method == http.MethodHead { - return fmt.Sprintf("unexpected status code %d %s (HEAD responses have no body, use GET for details)", e.StatusCode, http.StatusText(e.StatusCode)) - } - return fmt.Sprintf("unexpected status code %d %s", e.StatusCode, http.StatusText(e.StatusCode)) - } - return fmt.Sprintf("unexpected status code %d %s: %s", e.StatusCode, http.StatusText(e.StatusCode), e.rawBody) - case 1: - return e.Errors[0].String() - default: - var errors []string - for _, d := range e.Errors { - errors = append(errors, d.String()) - } - return fmt.Sprintf("multiple errors returned: %s", - strings.Join(errors, "; ")) - } -} - -// Temporary returns whether the request that preceded the error is temporary. -func (e *Error) Temporary() bool { - if len(e.Errors) == 0 { - _, ok := temporaryStatusCodes[e.StatusCode] - return ok - } - for _, d := range e.Errors { - if _, ok := temporaryErrorCodes[d.Code]; !ok { - return false - } - } - return true -} - -// TODO(jonjohnsonjr): Consider moving to internal/redact. -func redactURL(original *url.URL) *url.URL { - qs := original.Query() - for k, v := range qs { - for i := range v { - if _, ok := paramAllowlist[k]; !ok { - // key is not in the Allowlist - v[i] = "REDACTED" - } - } - } - redacted := *original - redacted.RawQuery = qs.Encode() - return &redacted -} - -// Diagnostic represents a single error returned by a Docker registry interaction. -type Diagnostic struct { - Code ErrorCode `json:"code"` - Message string `json:"message,omitempty"` - Detail interface{} `json:"detail,omitempty"` -} - -// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail] -func (d Diagnostic) String() string { - msg := fmt.Sprintf("%s: %s", d.Code, d.Message) - if d.Detail != nil { - msg = fmt.Sprintf("%s; %v", msg, d.Detail) - } - return msg -} - -// ErrorCode is an enumeration of supported error codes. -type ErrorCode string - -// The set of error conditions a registry may return: -// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2 -const ( - BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN" - BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID" - BlobUploadUnknownErrorCode ErrorCode = "BLOB_UPLOAD_UNKNOWN" - DigestInvalidErrorCode ErrorCode = "DIGEST_INVALID" - ManifestBlobUnknownErrorCode ErrorCode = "MANIFEST_BLOB_UNKNOWN" - ManifestInvalidErrorCode ErrorCode = "MANIFEST_INVALID" - ManifestUnknownErrorCode ErrorCode = "MANIFEST_UNKNOWN" - ManifestUnverifiedErrorCode ErrorCode = "MANIFEST_UNVERIFIED" - NameInvalidErrorCode ErrorCode = "NAME_INVALID" - NameUnknownErrorCode ErrorCode = "NAME_UNKNOWN" - SizeInvalidErrorCode ErrorCode = "SIZE_INVALID" - TagInvalidErrorCode ErrorCode = "TAG_INVALID" - UnauthorizedErrorCode ErrorCode = "UNAUTHORIZED" - DeniedErrorCode ErrorCode = "DENIED" - UnsupportedErrorCode ErrorCode = "UNSUPPORTED" - TooManyRequestsErrorCode ErrorCode = "TOOMANYREQUESTS" -) - -// TODO: Include other error types. -var temporaryErrorCodes = map[ErrorCode]struct{}{ - BlobUploadInvalidErrorCode: {}, - TooManyRequestsErrorCode: {}, -} - -var temporaryStatusCodes = map[int]struct{}{ - http.StatusRequestTimeout: {}, - http.StatusInternalServerError: {}, - http.StatusBadGateway: {}, - http.StatusServiceUnavailable: {}, -} - -// CheckError returns a structured error if the response status is not in codes. -func CheckError(resp *http.Response, codes ...int) error { - for _, code := range codes { - if resp.StatusCode == code { - // This is one of the supported status codes. - return nil - } - } - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors - structuredError := &Error{} - - // This can fail if e.g. the response body is not valid JSON. That's fine, - // we'll construct an appropriate error string from the body and status code. - _ = json.Unmarshal(b, structuredError) - - structuredError.rawBody = string(b) - structuredError.StatusCode = resp.StatusCode - structuredError.request = resp.Request - - return structuredError -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go deleted file mode 100644 index c341f844e6..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2020 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "fmt" - "net/http" - "net/http/httputil" - "time" - - "github.com/google/go-containerregistry/internal/redact" - "github.com/google/go-containerregistry/pkg/logs" -) - -type logTransport struct { - inner http.RoundTripper -} - -// NewLogger returns a transport that logs requests and responses to -// github.com/google/go-containerregistry/pkg/logs.Debug. -func NewLogger(inner http.RoundTripper) http.RoundTripper { - return &logTransport{inner} -} - -func (t *logTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { - // Inspired by: github.com/motemen/go-loghttp - - // We redact token responses and binary blobs in response/request. - omitBody, reason := redact.FromContext(in.Context()) - if omitBody { - logs.Debug.Printf("--> %s %s [body redacted: %s]", in.Method, in.URL, reason) - } else { - logs.Debug.Printf("--> %s %s", in.Method, in.URL) - } - - // Save these headers so we can redact Authorization. - savedHeaders := in.Header.Clone() - if in.Header != nil && in.Header.Get("authorization") != "" { - in.Header.Set("authorization", "") - } - - b, err := httputil.DumpRequestOut(in, !omitBody) - if err == nil { - logs.Debug.Println(string(b)) - } else { - logs.Debug.Printf("Failed to dump request %s %s: %v", in.Method, in.URL, err) - } - - // Restore the non-redacted headers. - in.Header = savedHeaders - - start := time.Now() - out, err = t.inner.RoundTrip(in) - duration := time.Since(start) - if err != nil { - logs.Debug.Printf("<-- %v %s %s (%s)", err, in.Method, in.URL, duration) - } - if out != nil { - msg := fmt.Sprintf("<-- %d", out.StatusCode) - if out.Request != nil { - msg = fmt.Sprintf("%s %s", msg, out.Request.URL) - } - msg = fmt.Sprintf("%s (%s)", msg, duration) - - if omitBody { - msg = fmt.Sprintf("%s [body redacted: %s]", msg, reason) - } - - logs.Debug.Print(msg) - - b, err := httputil.DumpResponse(out, !omitBody) - if err == nil { - logs.Debug.Println(string(b)) - } else { - logs.Debug.Printf("Failed to dump response %s %s: %v", in.Method, in.URL, err) - } - } - return -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go deleted file mode 100644 index 396d4e0342..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - - authchallenge "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/google/go-containerregistry/pkg/name" -) - -type challenge string - -const ( - anonymous challenge = "anonymous" - basic challenge = "basic" - bearer challenge = "bearer" -) - -type pingResp struct { - challenge challenge - - // Following the challenge there are often key/value pairs - // e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz" - parameters map[string]string - - // The registry's scheme to use. Communicates whether we fell back to http. - scheme string -} - -func (c challenge) Canonical() challenge { - return challenge(strings.ToLower(string(c))) -} - -func parseChallenge(suffix string) map[string]string { - kv := make(map[string]string) - for _, token := range strings.Split(suffix, ",") { - // Trim any whitespace around each token. - token = strings.Trim(token, " ") - - // Break the token into a key/value pair - if parts := strings.SplitN(token, "=", 2); len(parts) == 2 { - // Unquote the value, if it is quoted. - kv[parts[0]] = strings.Trim(parts[1], `"`) - } else { - // If there was only one part, treat is as a key with an empty value - kv[token] = "" - } - } - return kv -} - -func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingResp, error) { - client := http.Client{Transport: t} - - // This first attempts to use "https" for every request, falling back to http - // if the registry matches our localhost heuristic or if it is intentionally - // set to insecure via name.NewInsecureRegistry. - schemes := []string{"https"} - if reg.Scheme() == "http" { - schemes = append(schemes, "http") - } - - var errs []string - for _, scheme := range schemes { - url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name()) - req, err := http.NewRequest(http.MethodGet, url, nil) - if err != nil { - return nil, err - } - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - errs = append(errs, err.Error()) - // Potentially retry with http. - continue - } - defer func() { - // By draining the body, make sure to reuse the connection made by - // the ping for the following access to the registry - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() - - switch resp.StatusCode { - case http.StatusOK: - // If we get a 200, then no authentication is needed. - return &pingResp{ - challenge: anonymous, - scheme: scheme, - }, nil - case http.StatusUnauthorized: - if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 { - // If we hit more than one, I'm not even sure what to do. - wac := challenges[0] - return &pingResp{ - challenge: challenge(wac.Scheme).Canonical(), - parameters: wac.Parameters, - scheme: scheme, - }, nil - } - // Otherwise, just return the challenge without parameters. - return &pingResp{ - challenge: challenge(resp.Header.Get("WWW-Authenticate")).Canonical(), - scheme: scheme, - }, nil - default: - return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized) - } - } - return nil, errors.New(strings.Join(errs, "; ")) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go deleted file mode 100644 index 7f7d1e452e..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net/http" - "time" - - "github.com/google/go-containerregistry/internal/retry" -) - -// Sleep for 0.1, 0.3, 0.9, 2.7 seconds. This should cover networking blips. -var defaultBackoff = retry.Backoff{ - Duration: 100 * time.Millisecond, - Factor: 3.0, - Jitter: 0.1, - Steps: 5, -} - -var _ http.RoundTripper = (*retryTransport)(nil) - -// retryTransport wraps a RoundTripper and retries temporary network errors. -type retryTransport struct { - inner http.RoundTripper - backoff retry.Backoff - predicate retry.Predicate -} - -// Option is a functional option for retryTransport. -type Option func(*options) - -type options struct { - backoff retry.Backoff - predicate retry.Predicate -} - -// WithRetryBackoff sets the backoff for retry operations. -func WithRetryBackoff(backoff retry.Backoff) Option { - return func(o *options) { - o.backoff = backoff - } -} - -// WithRetryPredicate sets the predicate for retry operations. -func WithRetryPredicate(predicate func(error) bool) Option { - return func(o *options) { - o.predicate = predicate - } -} - -// NewRetry returns a transport that retries errors. -func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper { - o := &options{ - backoff: defaultBackoff, - predicate: retry.IsTemporary, - } - - for _, opt := range opts { - opt(o) - } - - return &retryTransport{ - inner: inner, - backoff: o.backoff, - predicate: o.predicate, - } -} - -func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { - roundtrip := func() error { - out, err = t.inner.RoundTrip(in) - return err - } - retry.Retry(roundtrip, t.predicate, t.backoff) - return -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go deleted file mode 100644 index d70b6a850c..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net/http" - - "github.com/google/go-containerregistry/pkg/name" -) - -type schemeTransport struct { - // Scheme we should use, determined by ping response. - scheme string - - // Registry we're talking to. - registry name.Registry - - // Wrapped by schemeTransport. - inner http.RoundTripper -} - -// RoundTrip implements http.RoundTripper -func (st *schemeTransport) RoundTrip(in *http.Request) (*http.Response, error) { - // When we ping() the registry, we determine whether to use http or https - // based on which scheme was successful. That is only valid for the - // registry server and not e.g. a separate token server or blob storage, - // so we should only override the scheme if the host is the registry. - if matchesHost(st.registry, in, st.scheme) { - in.URL.Scheme = st.scheme - } - return st.inner.RoundTrip(in) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go deleted file mode 100644 index c3b56f7a41..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -// Scopes suitable to qualify each Repository -const ( - PullScope string = "pull" - PushScope string = "push,pull" - // For now DELETE is PUSH, which is the read/write ACL. - DeleteScope string = PushScope - CatalogScope string = "catalog" -) diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go deleted file mode 100644 index 5c35fc7c9b..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "context" - "fmt" - "net/http" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" -) - -// New returns a new RoundTripper based on the provided RoundTripper that has been -// setup to authenticate with the remote registry "reg", in the capacity -// laid out by the specified scopes. -// -// TODO(jonjohnsonjr): Deprecate this. -func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { - return NewWithContext(context.Background(), reg, auth, t, scopes) -} - -// NewWithContext returns a new RoundTripper based on the provided RoundTripper that has been -// setup to authenticate with the remote registry "reg", in the capacity -// laid out by the specified scopes. -func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { - // The handshake: - // 1. Use "t" to ping() the registry for the authentication challenge. - // - // 2a. If we get back a 200, then simply use "t". - // - // 2b. If we get back a 401 with a Basic challenge, then use a transport - // that just attachs auth each roundtrip. - // - // 2c. If we get back a 401 with a Bearer challenge, then use a transport - // that attaches a bearer token to each request, and refreshes is on 401s. - // Perform an initial refresh to seed the bearer token. - - // First we ping the registry to determine the parameters of the authentication handshake - // (if one is even necessary). - pr, err := ping(ctx, reg, t) - if err != nil { - return nil, err - } - - // Wrap t with a useragent transport unless we already have one. - if _, ok := t.(*userAgentTransport); !ok { - t = NewUserAgent(t, "") - } - - // Wrap t in a transport that selects the appropriate scheme based on the ping response. - t = &schemeTransport{ - scheme: pr.scheme, - registry: reg, - inner: t, - } - - switch pr.challenge.Canonical() { - case anonymous: - return t, nil - case basic: - return &basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}, nil - case bearer: - // We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth. - realm, ok := pr.parameters["realm"] - if !ok { - return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters) - } - service, ok := pr.parameters["service"] - if !ok { - // If the service parameter is not specified, then default it to the registry - // with which we are talking. - service = reg.String() - } - bt := &bearerTransport{ - inner: t, - basic: auth, - realm: realm, - registry: reg, - service: service, - scopes: scopes, - scheme: pr.scheme, - } - if err := bt.refresh(ctx); err != nil { - return nil, err - } - return bt, nil - default: - return nil, fmt.Errorf("unrecognized challenge: %s", pr.challenge) - } -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go deleted file mode 100644 index 74a9e71bdf..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2019 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "fmt" - "net/http" - "runtime/debug" -) - -var ( - // Version can be set via: - // -ldflags="-X 'github.com/google/go-containerregistry/pkg/v1/remote/transport.Version=$TAG'" - Version string - - ggcrVersion = defaultUserAgent -) - -const ( - defaultUserAgent = "go-containerregistry" - moduleName = "github.com/google/go-containerregistry" -) - -type userAgentTransport struct { - inner http.RoundTripper - ua string -} - -func init() { - if v := version(); v != "" { - ggcrVersion = fmt.Sprintf("%s/%s", defaultUserAgent, v) - } -} - -func version() string { - if Version != "" { - // Version was set via ldflags, just return it. - return Version - } - - info, ok := debug.ReadBuildInfo() - if !ok { - return "" - } - - // Happens for crane and gcrane. - if info.Main.Path == moduleName { - return info.Main.Version - } - - // Anything else. - for _, dep := range info.Deps { - if dep.Path == moduleName { - return dep.Version - } - } - - return "" -} - -// NewUserAgent returns an http.Roundtripper that sets the user agent to -// The provided string plus additional go-containerregistry information, -// e.g. if provided "crane/v0.1.4" and this modules was built at v0.1.4: -// -// User-Agent: crane/v0.1.4 go-containerregistry/v0.1.4 -func NewUserAgent(inner http.RoundTripper, ua string) http.RoundTripper { - if ua == "" { - ua = ggcrVersion - } else { - ua = fmt.Sprintf("%s %s", ua, ggcrVersion) - } - return &userAgentTransport{ - inner: inner, - ua: ua, - } -} - -// RoundTrip implements http.RoundTripper -func (ut *userAgentTransport) RoundTrip(in *http.Request) (*http.Response, error) { - in.Header.Set("User-Agent", ut.ua) - return ut.inner.RoundTrip(in) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go deleted file mode 100644 index 05d99d076d..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go +++ /dev/null @@ -1,901 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "sync/atomic" - "syscall" - "time" - - "github.com/google/go-containerregistry/internal/redact" - "github.com/google/go-containerregistry/internal/retry" - "github.com/google/go-containerregistry/pkg/logs" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/stream" - "github.com/google/go-containerregistry/pkg/v1/types" - "golang.org/x/sync/errgroup" -) - -// Taggable is an interface that enables a manifest PUT (e.g. for tagging). -type Taggable interface { - RawManifest() ([]byte, error) -} - -// Write pushes the provided img to the specified image reference. -func Write(ref name.Reference, img v1.Image, options ...Option) (rerr error) { - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return err - } - - var lastUpdate *v1.Update - if o.updates != nil { - lastUpdate = &v1.Update{} - lastUpdate.Total, err = countImage(img, o.allowNondistributableArtifacts) - if err != nil { - return err - } - defer close(o.updates) - defer func() { sendError(o.updates, rerr) }() - } - return writeImage(ref, img, o, lastUpdate) -} - -func writeImage(ref name.Reference, img v1.Image, o *options, lastUpdate *v1.Update) error { - ls, err := img.Layers() - if err != nil { - return err - } - scopes := scopesForUploadingImage(ref.Context(), ls) - tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) - if err != nil { - return err - } - w := writer{ - repo: ref.Context(), - client: &http.Client{Transport: tr}, - context: o.context, - updates: o.updates, - lastUpdate: lastUpdate, - } - - // Upload individual blobs and collect any errors. - blobChan := make(chan v1.Layer, 2*o.jobs) - g, ctx := errgroup.WithContext(o.context) - for i := 0; i < o.jobs; i++ { - // Start N workers consuming blobs to upload. - g.Go(func() error { - for b := range blobChan { - if err := w.uploadOne(b); err != nil { - return err - } - } - return nil - }) - } - - // Upload individual layers in goroutines and collect any errors. - // If we can dedupe by the layer digest, try to do so. If we can't determine - // the digest for whatever reason, we can't dedupe and might re-upload. - g.Go(func() error { - defer close(blobChan) - uploaded := map[v1.Hash]bool{} - for _, l := range ls { - l := l - - // Handle foreign layers. - mt, err := l.MediaType() - if err != nil { - return err - } - if !mt.IsDistributable() && !o.allowNondistributableArtifacts { - continue - } - - // Streaming layers calculate their digests while uploading them. Assume - // an error here indicates we need to upload the layer. - h, err := l.Digest() - if err == nil { - // If we can determine the layer's digest ahead of - // time, use it to dedupe uploads. - if uploaded[h] { - continue // Already uploading. - } - uploaded[h] = true - } - select { - case blobChan <- l: - case <-ctx.Done(): - return ctx.Err() - } - } - return nil - }) - if err := g.Wait(); err != nil { - return err - } - - if l, err := partial.ConfigLayer(img); err != nil { - // We can't read the ConfigLayer, possibly because of streaming layers, - // since the layer DiffIDs haven't been calculated yet. Attempt to wait - // for the other layers to be uploaded, then try the config again. - if err := g.Wait(); err != nil { - return err - } - - // Now that all the layers are uploaded, try to upload the config file blob. - l, err := partial.ConfigLayer(img) - if err != nil { - return err - } - if err := w.uploadOne(l); err != nil { - return err - } - } else { - // We *can* read the ConfigLayer, so upload it concurrently with the layers. - g.Go(func() error { - return w.uploadOne(l) - }) - - // Wait for the layers + config. - if err := g.Wait(); err != nil { - return err - } - } - - // With all of the constituent elements uploaded, upload the manifest - // to commit the image. - return w.commitManifest(img, ref) -} - -// writer writes the elements of an image to a remote image reference. -type writer struct { - repo name.Repository - client *http.Client - context context.Context - - updates chan<- v1.Update - lastUpdate *v1.Update -} - -func sendError(ch chan<- v1.Update, err error) error { - if err != nil && ch != nil { - ch <- v1.Update{Error: err} - } - return err -} - -// url returns a url.Url for the specified path in the context of this remote image reference. -func (w *writer) url(path string) url.URL { - return url.URL{ - Scheme: w.repo.Registry.Scheme(), - Host: w.repo.RegistryStr(), - Path: path, - } -} - -// nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence. -func (w *writer) nextLocation(resp *http.Response) (string, error) { - loc := resp.Header.Get("Location") - if len(loc) == 0 { - return "", errors.New("missing Location header") - } - u, err := url.Parse(loc) - if err != nil { - return "", err - } - - // If the location header returned is just a url path, then fully qualify it. - // We cannot simply call w.url, since there might be an embedded query string. - return resp.Request.URL.ResolveReference(u).String(), nil -} - -// checkExistingBlob checks if a blob exists already in the repository by making a -// HEAD request to the blob store API. GCR performs an existence check on the -// initiation if "mount" is specified, even if no "from" sources are specified. -// However, this is not broadly applicable to all registries, e.g. ECR. -func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) { - u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.repo.RepositoryStr(), h.String())) - - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return false, err - } - - resp, err := w.client.Do(req.WithContext(w.context)) - if err != nil { - return false, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { - return false, err - } - - return resp.StatusCode == http.StatusOK, nil -} - -// checkExistingManifest checks if a manifest exists already in the repository -// by making a HEAD request to the manifest API. -func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, error) { - u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), h.String())) - - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return false, err - } - req.Header.Set("Accept", string(mt)) - - resp, err := w.client.Do(req.WithContext(w.context)) - if err != nil { - return false, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { - return false, err - } - - return resp.StatusCode == http.StatusOK, nil -} - -// initiateUpload initiates the blob upload, which starts with a POST that can -// optionally include the hash of the layer and a list of repositories from -// which that layer might be read. On failure, an error is returned. -// On success, the layer was either mounted (nothing more to do) or a blob -// upload was initiated and the body of that blob should be sent to the returned -// location. -func (w *writer) initiateUpload(from, mount string) (location string, mounted bool, err error) { - u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.repo.RepositoryStr())) - uv := url.Values{} - if mount != "" && from != "" { - // Quay will fail if we specify a "mount" without a "from". - uv["mount"] = []string{mount} - uv["from"] = []string{from} - } - u.RawQuery = uv.Encode() - - // Make the request to initiate the blob upload. - req, err := http.NewRequest(http.MethodPost, u.String(), nil) - if err != nil { - return "", false, err - } - req.Header.Set("Content-Type", "application/json") - resp, err := w.client.Do(req.WithContext(w.context)) - if err != nil { - return "", false, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { - return "", false, err - } - - // Check the response code to determine the result. - switch resp.StatusCode { - case http.StatusCreated: - // We're done, we were able to fast-path. - return "", true, nil - case http.StatusAccepted: - // Proceed to PATCH, upload has begun. - loc, err := w.nextLocation(resp) - return loc, false, err - default: - panic("Unreachable: initiateUpload") - } -} - -type progressReader struct { - rc io.ReadCloser - - count *int64 // number of bytes this reader has read, to support resetting on retry. - updates chan<- v1.Update - lastUpdate *v1.Update -} - -func (r *progressReader) Read(b []byte) (int, error) { - n, err := r.rc.Read(b) - if err != nil { - return n, err - } - atomic.AddInt64(r.count, int64(n)) - // TODO: warn/debug log if sending takes too long, or if sending is blocked while context is cancelled. - r.updates <- v1.Update{ - Total: r.lastUpdate.Total, - Complete: atomic.AddInt64(&r.lastUpdate.Complete, int64(n)), - } - return n, nil -} - -func (r *progressReader) Close() error { return r.rc.Close() } - -// streamBlob streams the contents of the blob to the specified location. -// On failure, this will return an error. On success, this will return the location -// header indicating how to commit the streamed blob. -func (w *writer) streamBlob(ctx context.Context, blob io.ReadCloser, streamLocation string) (commitLocation string, rerr error) { - reset := func() {} - defer func() { - if rerr != nil { - reset() - } - }() - if w.updates != nil { - var count int64 - blob = &progressReader{rc: blob, updates: w.updates, lastUpdate: w.lastUpdate, count: &count} - reset = func() { - atomic.AddInt64(&w.lastUpdate.Complete, -count) - w.updates <- *w.lastUpdate - } - } - - req, err := http.NewRequest(http.MethodPatch, streamLocation, blob) - if err != nil { - return "", err - } - - resp, err := w.client.Do(req.WithContext(ctx)) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { - return "", err - } - - // The blob has been uploaded, return the location header indicating - // how to commit this layer. - return w.nextLocation(resp) -} - -// commitBlob commits this blob by sending a PUT to the location returned from -// streaming the blob. -func (w *writer) commitBlob(location, digest string) error { - u, err := url.Parse(location) - if err != nil { - return err - } - v := u.Query() - v.Set("digest", digest) - u.RawQuery = v.Encode() - - req, err := http.NewRequest(http.MethodPut, u.String(), nil) - if err != nil { - return err - } - - resp, err := w.client.Do(req.WithContext(w.context)) - if err != nil { - return err - } - defer resp.Body.Close() - - return transport.CheckError(resp, http.StatusCreated) -} - -// incrProgress increments and sends a progress update, if WithProgress is used. -func (w *writer) incrProgress(written int64) { - if w.updates == nil { - return - } - w.updates <- v1.Update{ - Total: w.lastUpdate.Total, - Complete: atomic.AddInt64(&w.lastUpdate.Complete, int64(written)), - } -} - -// uploadOne performs a complete upload of a single layer. -func (w *writer) uploadOne(l v1.Layer) error { - var from, mount string - if h, err := l.Digest(); err == nil { - // If we know the digest, this isn't a streaming layer. Do an existence - // check so we can skip uploading the layer if possible. - existing, err := w.checkExistingBlob(h) - if err != nil { - return err - } - if existing { - size, err := l.Size() - if err != nil { - return err - } - w.incrProgress(size) - logs.Progress.Printf("existing blob: %v", h) - return nil - } - - mount = h.String() - } - if ml, ok := l.(*MountableLayer); ok { - if w.repo.RegistryStr() == ml.Reference.Context().RegistryStr() { - from = ml.Reference.Context().RepositoryStr() - } - } - - ctx := w.context - - shouldRetry := func(err error) bool { - // Various failure modes here, as we're often reading from and writing to - // the network. - if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.EPIPE) { - logs.Warn.Printf("retrying %v", err) - return true - } - return false - } - - tryUpload := func() error { - location, mounted, err := w.initiateUpload(from, mount) - if err != nil { - return err - } else if mounted { - size, err := l.Size() - if err != nil { - return err - } - w.incrProgress(size) - h, err := l.Digest() - if err != nil { - return err - } - logs.Progress.Printf("mounted blob: %s", h.String()) - return nil - } - - // Only log layers with +json or +yaml. We can let through other stuff if it becomes popular. - // TODO(opencontainers/image-spec#791): Would be great to have an actual parser. - mt, err := l.MediaType() - if err != nil { - return err - } - smt := string(mt) - if !(strings.HasSuffix(smt, "+json") || strings.HasSuffix(smt, "+yaml")) { - ctx = redact.NewContext(ctx, "omitting binary blobs from logs") - } - - blob, err := l.Compressed() - if err != nil { - return err - } - location, err = w.streamBlob(ctx, blob, location) - if err != nil { - return err - } - - h, err := l.Digest() - if err != nil { - return err - } - digest := h.String() - - if err := w.commitBlob(location, digest); err != nil { - return err - } - logs.Progress.Printf("pushed blob: %s", digest) - return nil - } - - // Try this three times, waiting 1s after first failure, 3s after second. - backoff := retry.Backoff{ - Duration: 1.0 * time.Second, - Factor: 3.0, - Jitter: 0.1, - Steps: 3, - } - - return retry.Retry(tryUpload, shouldRetry, backoff) -} - -type withLayer interface { - Layer(v1.Hash) (v1.Layer, error) -} - -func (w *writer) writeIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) error { - index, err := ii.IndexManifest() - if err != nil { - return err - } - - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return err - } - - // TODO(#803): Pipe through remote.WithJobs and upload these in parallel. - for _, desc := range index.Manifests { - ref := ref.Context().Digest(desc.Digest.String()) - exists, err := w.checkExistingManifest(desc.Digest, desc.MediaType) - if err != nil { - return err - } - if exists { - logs.Progress.Print("existing manifest: ", desc.Digest) - continue - } - - switch desc.MediaType { - case types.OCIImageIndex, types.DockerManifestList: - ii, err := ii.ImageIndex(desc.Digest) - if err != nil { - return err - } - if err := w.writeIndex(ref, ii); err != nil { - return err - } - case types.OCIManifestSchema1, types.DockerManifestSchema2: - img, err := ii.Image(desc.Digest) - if err != nil { - return err - } - if err := writeImage(ref, img, o, w.lastUpdate); err != nil { - return err - } - default: - // Workaround for #819. - if wl, ok := ii.(withLayer); ok { - layer, err := wl.Layer(desc.Digest) - if err != nil { - return err - } - if err := w.uploadOne(layer); err != nil { - return err - } - } - } - } - - // With all of the constituent elements uploaded, upload the manifest - // to commit the image. - return w.commitManifest(ii, ref) -} - -type withMediaType interface { - MediaType() (types.MediaType, error) -} - -// This is really silly, but go interfaces don't let me satisfy remote.Taggable -// with remote.Descriptor because of name collisions between method names and -// struct fields. -// -// Use reflection to either pull the v1.Descriptor out of remote.Descriptor or -// create a descriptor based on the RawManifest and (optionally) MediaType. -func unpackTaggable(t Taggable) ([]byte, *v1.Descriptor, error) { - if d, ok := t.(*Descriptor); ok { - return d.Manifest, &d.Descriptor, nil - } - b, err := t.RawManifest() - if err != nil { - return nil, nil, err - } - - // A reasonable default if Taggable doesn't implement MediaType. - mt := types.DockerManifestSchema2 - - if wmt, ok := t.(withMediaType); ok { - m, err := wmt.MediaType() - if err != nil { - return nil, nil, err - } - mt = m - } - - h, sz, err := v1.SHA256(bytes.NewReader(b)) - if err != nil { - return nil, nil, err - } - - return b, &v1.Descriptor{ - MediaType: mt, - Size: sz, - Digest: h, - }, nil -} - -// commitManifest does a PUT of the image's manifest. -func (w *writer) commitManifest(t Taggable, ref name.Reference) error { - raw, desc, err := unpackTaggable(t) - if err != nil { - return err - } - - u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), ref.Identifier())) - - // Make the request to PUT the serialized manifest - req, err := http.NewRequest(http.MethodPut, u.String(), bytes.NewBuffer(raw)) - if err != nil { - return err - } - req.Header.Set("Content-Type", string(desc.MediaType)) - - resp, err := w.client.Do(req.WithContext(w.context)) - if err != nil { - return err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { - return err - } - - // The image was successfully pushed! - logs.Progress.Printf("%v: digest: %v size: %d", ref, desc.Digest, desc.Size) - w.incrProgress(int64(len(raw))) - return nil -} - -func scopesForUploadingImage(repo name.Repository, layers []v1.Layer) []string { - // use a map as set to remove duplicates scope strings - scopeSet := map[string]struct{}{} - - for _, l := range layers { - if ml, ok := l.(*MountableLayer); ok { - // we will add push scope for ref.Context() after the loop. - // for now we ask pull scope for references of the same registry - if ml.Reference.Context().String() != repo.String() && ml.Reference.Context().Registry.String() == repo.Registry.String() { - scopeSet[ml.Reference.Scope(transport.PullScope)] = struct{}{} - } - } - } - - scopes := make([]string, 0) - // Push scope should be the first element because a few registries just look at the first scope to determine access. - scopes = append(scopes, repo.Scope(transport.PushScope)) - - for scope := range scopeSet { - scopes = append(scopes, scope) - } - - return scopes -} - -// WriteIndex pushes the provided ImageIndex to the specified image reference. -// WriteIndex will attempt to push all of the referenced manifests before -// attempting to push the ImageIndex, to retain referential integrity. -func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr error) { - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return err - } - - scopes := []string{ref.Scope(transport.PushScope)} - tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) - if err != nil { - return err - } - w := writer{ - repo: ref.Context(), - client: &http.Client{Transport: tr}, - context: o.context, - updates: o.updates, - } - - if o.updates != nil { - w.lastUpdate = &v1.Update{} - w.lastUpdate.Total, err = countIndex(ii, o.allowNondistributableArtifacts) - if err != nil { - return err - } - defer close(o.updates) - defer func() { sendError(o.updates, rerr) }() - } - - return w.writeIndex(ref, ii, options...) -} - -// countImage counts the total size of all layers + config blob + manifest for -// an image. It de-dupes duplicate layers. -func countImage(img v1.Image, allowNondistributableArtifacts bool) (int64, error) { - var total int64 - ls, err := img.Layers() - if err != nil { - return 0, err - } - seen := map[v1.Hash]bool{} - for _, l := range ls { - // Handle foreign layers. - mt, err := l.MediaType() - if err != nil { - return 0, err - } - if !mt.IsDistributable() && !allowNondistributableArtifacts { - continue - } - - // TODO: support streaming layers which update the total count as they write. - if _, ok := l.(*stream.Layer); ok { - return 0, errors.New("cannot use stream.Layer and WithProgress") - } - - // Dedupe layers. - d, err := l.Digest() - if err != nil { - return 0, err - } - if seen[d] { - continue - } - seen[d] = true - - size, err := l.Size() - if err != nil { - return 0, err - } - total += size - } - b, err := img.RawConfigFile() - if err != nil { - return 0, err - } - total += int64(len(b)) - size, err := img.Size() - if err != nil { - return 0, err - } - total += size - return total, nil -} - -// countIndex counts the total size of all images + sub-indexes for an index. -// It does not attempt to de-dupe duplicate images, etc. -func countIndex(idx v1.ImageIndex, allowNondistributableArtifacts bool) (int64, error) { - var total int64 - mf, err := idx.IndexManifest() - if err != nil { - return 0, err - } - - for _, desc := range mf.Manifests { - switch desc.MediaType { - case types.OCIImageIndex, types.DockerManifestList: - sidx, err := idx.ImageIndex(desc.Digest) - if err != nil { - return 0, err - } - size, err := countIndex(sidx, allowNondistributableArtifacts) - if err != nil { - return 0, err - } - total += size - case types.OCIManifestSchema1, types.DockerManifestSchema2: - simg, err := idx.Image(desc.Digest) - if err != nil { - return 0, err - } - size, err := countImage(simg, allowNondistributableArtifacts) - if err != nil { - return 0, err - } - total += size - default: - // Workaround for #819. - if wl, ok := idx.(withLayer); ok { - layer, err := wl.Layer(desc.Digest) - if err != nil { - return 0, err - } - size, err := layer.Size() - if err != nil { - return 0, err - } - total += size - } - } - } - - size, err := idx.Size() - if err != nil { - return 0, err - } - total += size - return total, nil -} - -// WriteLayer uploads the provided Layer to the specified repo. -func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr error) { - o, err := makeOptions(repo, options...) - if err != nil { - return err - } - scopes := scopesForUploadingImage(repo, []v1.Layer{layer}) - tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) - if err != nil { - return err - } - w := writer{ - repo: repo, - client: &http.Client{Transport: tr}, - context: o.context, - updates: o.updates, - } - - if o.updates != nil { - defer close(o.updates) - defer func() { sendError(o.updates, rerr) }() - - // TODO: support streaming layers which update the total count as they write. - if _, ok := layer.(*stream.Layer); ok { - return errors.New("cannot use stream.Layer and WithProgress") - } - size, err := layer.Size() - if err != nil { - return err - } - w.lastUpdate = &v1.Update{Total: size} - } - return w.uploadOne(layer) -} - -// Tag adds a tag to the given Taggable via PUT /v2/.../manifests/ -// -// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and -// remote.Descriptor. -// -// If t implements MediaType, we will use that for the Content-Type, otherwise -// we will default to types.DockerManifestSchema2. -// -// Tag does not attempt to write anything other than the manifest, so callers -// should ensure that all blobs or manifests that are referenced by t exist -// in the target registry. -func Tag(tag name.Tag, t Taggable, options ...Option) error { - return Put(tag, t, options...) -} - -// Put adds a manifest from the given Taggable via PUT /v1/.../manifest/ -// -// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and -// remote.Descriptor. -// -// If t implements MediaType, we will use that for the Content-Type, otherwise -// we will default to types.DockerManifestSchema2. -// -// Put does not attempt to write anything other than the manifest, so callers -// should ensure that all blobs or manifests that are referenced by t exist -// in the target registry. -func Put(ref name.Reference, t Taggable, options ...Option) error { - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return err - } - scopes := []string{ref.Scope(transport.PushScope)} - - // TODO: This *always* does a token exchange. For some registries, - // that's pretty slow. Some ideas; - // * Tag could take a list of tags. - // * Allow callers to pass in a transport.Transport, typecheck - // it to allow them to reuse the transport across multiple calls. - // * WithTag option to do multiple manifest PUTs in commitManifest. - tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) - if err != nil { - return err - } - w := writer{ - repo: ref.Context(), - client: &http.Client{Transport: tr}, - context: o.context, - } - - return w.commitManifest(t, ref) -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md b/test/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md deleted file mode 100644 index da0dda48d9..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# `stream` - -[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream) - -The `stream` package contains an implementation of -[`v1.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1#Layer) -that supports _streaming_ access, i.e. the layer contents are read once and not -buffered. - -## Usage - -```go -package main - -import ( - "os" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/google/go-containerregistry/pkg/v1/stream" -) - -// upload the contents of stdin as a layer to a local registry -func main() { - repo, err := name.NewRepository("localhost:5000/stream") - if err != nil { - panic(err) - } - - layer := stream.NewLayer(os.Stdin) - - if err := remote.WriteLayer(repo, layer); err != nil { - panic(err) - } -} -``` - -## Structure - -This implements the layer portion of an [image -upload](/pkg/v1/remote#anatomy-of-an-image-upload). We launch a goroutine that -is responsible for hashing the uncompressed contents to compute the `DiffID`, -gzipping them to produce the `Compressed` contents, and hashing/counting the -bytes to produce the `Digest`/`Size`. This goroutine writes to an -`io.PipeWriter`, which blocks until `Compressed` reads the gzipped contents from -the corresponding `io.PipeReader`. - -

- -

- -## Caveats - -This assumes that you have an uncompressed layer (i.e. a tarball) and would like -to compress it. Calling `Uncompressed` is always an error. Likewise, other -methods are invalid until the contents of `Compressed` have been completely -consumed and `Close`d. - -Using a `stream.Layer` will likely not work without careful consideration. For -example, in the `mutate` package, we defer computing the manifest and config -file until they are actually called. This allows you to `mutate.Append` a -streaming layer to an image without accidentally consuming it. Similarly, in -`remote.Write`, if calling `Digest` on a layer fails, we attempt to upload the -layer anyway, understanding that we may be dealing with a `stream.Layer` whose -contents need to be uploaded before we can upload the config file. - -Given the [structure](#structure) of how this is implemented, forgetting to -`Close` a `stream.Layer` will leak a goroutine. diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go deleted file mode 100644 index e91f57ab3a..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stream - -import ( - "bufio" - "compress/gzip" - "crypto/sha256" - "encoding/hex" - "errors" - "hash" - "io" - "os" - "sync" - - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/types" -) - -var ( - // ErrNotComputed is returned when the requested value is not yet - // computed because the stream has not been consumed yet. - ErrNotComputed = errors.New("value not computed until stream is consumed") - - // ErrConsumed is returned by Compressed when the underlying stream has - // already been consumed and closed. - ErrConsumed = errors.New("stream was already consumed") -) - -// Layer is a streaming implementation of v1.Layer. -type Layer struct { - blob io.ReadCloser - consumed bool - compression int - - mu sync.Mutex - digest, diffID *v1.Hash - size int64 -} - -var _ v1.Layer = (*Layer)(nil) - -// LayerOption applies options to layer -type LayerOption func(*Layer) - -// WithCompressionLevel sets the gzip compression. See `gzip.NewWriterLevel` for possible values. -func WithCompressionLevel(level int) LayerOption { - return func(l *Layer) { - l.compression = level - } -} - -// NewLayer creates a Layer from an io.ReadCloser. -func NewLayer(rc io.ReadCloser, opts ...LayerOption) *Layer { - layer := &Layer{ - blob: rc, - compression: gzip.BestSpeed, - } - - for _, opt := range opts { - opt(layer) - } - - return layer -} - -// Digest implements v1.Layer. -func (l *Layer) Digest() (v1.Hash, error) { - l.mu.Lock() - defer l.mu.Unlock() - if l.digest == nil { - return v1.Hash{}, ErrNotComputed - } - return *l.digest, nil -} - -// DiffID implements v1.Layer. -func (l *Layer) DiffID() (v1.Hash, error) { - l.mu.Lock() - defer l.mu.Unlock() - if l.diffID == nil { - return v1.Hash{}, ErrNotComputed - } - return *l.diffID, nil -} - -// Size implements v1.Layer. -func (l *Layer) Size() (int64, error) { - l.mu.Lock() - defer l.mu.Unlock() - if l.size == 0 { - return 0, ErrNotComputed - } - return l.size, nil -} - -// MediaType implements v1.Layer -func (l *Layer) MediaType() (types.MediaType, error) { - // We return DockerLayer for now as uncompressed layers - // are unimplemented - return types.DockerLayer, nil -} - -// Uncompressed implements v1.Layer. -func (l *Layer) Uncompressed() (io.ReadCloser, error) { - return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented") -} - -// Compressed implements v1.Layer. -func (l *Layer) Compressed() (io.ReadCloser, error) { - if l.consumed { - return nil, ErrConsumed - } - return newCompressedReader(l) -} - -type compressedReader struct { - closer io.Closer // original blob's Closer. - - h, zh hash.Hash // collects digests of compressed and uncompressed stream. - pr io.Reader - bw *bufio.Writer - count *countWriter - - l *Layer // stream.Layer to update upon Close. -} - -func newCompressedReader(l *Layer) (*compressedReader, error) { - h := sha256.New() - zh := sha256.New() - count := &countWriter{} - - // gzip.Writer writes to the output stream via pipe, a hasher to - // capture compressed digest, and a countWriter to capture compressed - // size. - pr, pw := io.Pipe() - - // Write compressed bytes to be read by the pipe.Reader, hashed by zh, and counted by count. - mw := io.MultiWriter(pw, zh, count) - - // Buffer the output of the gzip writer so we don't have to wait on pr to keep writing. - // 64K ought to be small enough for anybody. - bw := bufio.NewWriterSize(mw, 2<<16) - zw, err := gzip.NewWriterLevel(bw, l.compression) - if err != nil { - return nil, err - } - - cr := &compressedReader{ - closer: newMultiCloser(zw, l.blob), - pr: pr, - bw: bw, - h: h, - zh: zh, - count: count, - l: l, - } - go func() { - if _, err := io.Copy(io.MultiWriter(h, zw), l.blob); err != nil { - pw.CloseWithError(err) - return - } - // Now close the compressed reader, to flush the gzip stream - // and calculate digest/diffID/size. This will cause pr to - // return EOF which will cause readers of the Compressed stream - // to finish reading. - pw.CloseWithError(cr.Close()) - }() - - return cr, nil -} - -func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) } - -func (cr *compressedReader) Close() error { - cr.l.mu.Lock() - defer cr.l.mu.Unlock() - - // Close the inner ReadCloser. - if err := cr.closer.Close(); err != nil { - return err - } - - // Flush the buffer. - if err := cr.bw.Flush(); err != nil { - return err - } - - diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.h.Sum(nil))) - if err != nil { - return err - } - cr.l.diffID = &diffID - - digest, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.zh.Sum(nil))) - if err != nil { - return err - } - cr.l.digest = &digest - - cr.l.size = cr.count.n - cr.l.consumed = true - return nil -} - -// countWriter counts bytes written to it. -type countWriter struct{ n int64 } - -func (c *countWriter) Write(p []byte) (int, error) { - c.n += int64(len(p)) - return len(p), nil -} - -// multiCloser is a Closer that collects multiple Closers and Closes them in order. -type multiCloser []io.Closer - -var _ io.Closer = (multiCloser)(nil) - -func newMultiCloser(c ...io.Closer) multiCloser { return multiCloser(c) } - -func (m multiCloser) Close() error { - for _, c := range m { - // NOTE: net/http will call close on success, so if we've already - // closed the inner rc, it's not an error. - if err := c.Close(); err != nil && !errors.Is(err, os.ErrClosed) { - return err - } - } - return nil -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go deleted file mode 100644 index 21f2236502..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -// MediaType is an enumeration of the supported mime types that an element of an image might have. -type MediaType string - -// The collection of known MediaType values. -const ( - OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json" - OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json" - OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json" - OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json" - OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip" - OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" - OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar" - OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar" - - DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json" - DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" - DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json" - DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json" - DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" - DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json" - DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json" - DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" - DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar" - - OCIVendorPrefix = "vnd.oci" - DockerVendorPrefix = "vnd.docker" -) - -// IsDistributable returns true if a layer is distributable, see: -// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers -func (m MediaType) IsDistributable() bool { - switch m { - case DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: - return false - } - return true -} - -// IsImage returns true if the mediaType represents an image manifest, as opposed to something else, like an index. -func (m MediaType) IsImage() bool { - switch m { - case OCIManifestSchema1, DockerManifestSchema2: - return true - } - return false -} - -// IsIndex returns true if the mediaType represents an index, as opposed to something else, like an image. -func (m MediaType) IsIndex() bool { - switch m { - case OCIImageIndex, DockerManifestList: - return true - } - return false -} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go b/test/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go deleted file mode 100644 index 3f92f09135..0000000000 --- a/test/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go +++ /dev/null @@ -1,318 +0,0 @@ -// +build !ignore_autogenerated - -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Config) DeepCopyInto(out *Config) { - *out = *in - if in.Cmd != nil { - in, out := &in.Cmd, &out.Cmd - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Healthcheck != nil { - in, out := &in.Healthcheck, &out.Healthcheck - *out = new(HealthConfig) - (*in).DeepCopyInto(*out) - } - if in.Entrypoint != nil { - in, out := &in.Entrypoint, &out.Entrypoint - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.OnBuild != nil { - in, out := &in.OnBuild, &out.OnBuild - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make(map[string]struct{}, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ExposedPorts != nil { - in, out := &in.ExposedPorts, &out.ExposedPorts - *out = make(map[string]struct{}, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Shell != nil { - in, out := &in.Shell, &out.Shell - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. -func (in *Config) DeepCopy() *Config { - if in == nil { - return nil - } - out := new(Config) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigFile) DeepCopyInto(out *ConfigFile) { - *out = *in - in.Created.DeepCopyInto(&out.Created) - if in.History != nil { - in, out := &in.History, &out.History - *out = make([]History, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.RootFS.DeepCopyInto(&out.RootFS) - in.Config.DeepCopyInto(&out.Config) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile. -func (in *ConfigFile) DeepCopy() *ConfigFile { - if in == nil { - return nil - } - out := new(ConfigFile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Descriptor) DeepCopyInto(out *Descriptor) { - *out = *in - out.Digest = in.Digest - if in.URLs != nil { - in, out := &in.URLs, &out.URLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Platform != nil { - in, out := &in.Platform, &out.Platform - *out = new(Platform) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor. -func (in *Descriptor) DeepCopy() *Descriptor { - if in == nil { - return nil - } - out := new(Descriptor) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Hash) DeepCopyInto(out *Hash) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash. -func (in *Hash) DeepCopy() *Hash { - if in == nil { - return nil - } - out := new(Hash) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HealthConfig) DeepCopyInto(out *HealthConfig) { - *out = *in - if in.Test != nil { - in, out := &in.Test, &out.Test - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthConfig. -func (in *HealthConfig) DeepCopy() *HealthConfig { - if in == nil { - return nil - } - out := new(HealthConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *History) DeepCopyInto(out *History) { - *out = *in - in.Created.DeepCopyInto(&out.Created) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History. -func (in *History) DeepCopy() *History { - if in == nil { - return nil - } - out := new(History) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexManifest) DeepCopyInto(out *IndexManifest) { - *out = *in - if in.Manifests != nil { - in, out := &in.Manifests, &out.Manifests - *out = make([]Descriptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManifest. -func (in *IndexManifest) DeepCopy() *IndexManifest { - if in == nil { - return nil - } - out := new(IndexManifest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Manifest) DeepCopyInto(out *Manifest) { - *out = *in - in.Config.DeepCopyInto(&out.Config) - if in.Layers != nil { - in, out := &in.Layers, &out.Layers - *out = make([]Descriptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest. -func (in *Manifest) DeepCopy() *Manifest { - if in == nil { - return nil - } - out := new(Manifest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Platform) DeepCopyInto(out *Platform) { - *out = *in - if in.OSFeatures != nil { - in, out := &in.OSFeatures, &out.OSFeatures - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Features != nil { - in, out := &in.Features, &out.Features - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. -func (in *Platform) DeepCopy() *Platform { - if in == nil { - return nil - } - out := new(Platform) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RootFS) DeepCopyInto(out *RootFS) { - *out = *in - if in.DiffIDs != nil { - in, out := &in.DiffIDs, &out.DiffIDs - *out = make([]Hash, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS. -func (in *RootFS) DeepCopy() *RootFS { - if in == nil { - return nil - } - out := new(RootFS) - in.DeepCopyInto(out) - return out -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. -func (in *Time) DeepCopy() *Time { - if in == nil { - return nil - } - out := new(Time) - in.DeepCopyInto(out) - return out -} diff --git a/test/vendor/github.com/kevpar/cri/LICENSE b/test/vendor/github.com/kevpar/cri/LICENSE deleted file mode 100644 index 8dada3edaf..0000000000 --- a/test/vendor/github.com/kevpar/cri/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/test/vendor/github.com/kevpar/cri/pkg/annotations/annotations.go b/test/vendor/github.com/kevpar/cri/pkg/annotations/annotations.go deleted file mode 100644 index af01c99a38..0000000000 --- a/test/vendor/github.com/kevpar/cri/pkg/annotations/annotations.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2018 The Containerd Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package annotations - -// ContainerType values -// Following OCI annotations are used by katacontainers now. -// We'll switch to standard secure pod API after it is defined in CRI. -const ( - // ContainerTypeSandbox represents a pod sandbox container - ContainerTypeSandbox = "sandbox" - - // ContainerTypeContainer represents a container running within a pod - ContainerTypeContainer = "container" - - // ContainerType is the container type (sandbox or container) annotation - ContainerType = "io.kubernetes.cri.container-type" - - // SandboxID is the sandbox ID annotation - SandboxID = "io.kubernetes.cri.sandbox-id" - - // UntrustedWorkload is the sandbox annotation for untrusted workload. Untrusted - // workload can only run on dedicated runtime for untrusted workload. - UntrustedWorkload = "io.kubernetes.cri.untrusted-workload" - - // EnableReset is a sandbox and container annotation to allow reseting a container or pod that - // has already exited back to a ready/created state - EnableReset = "io.microsoft.cri.enablereset" -) diff --git a/test/vendor/github.com/kevpar/cri/pkg/api/v1/api.pb.go b/test/vendor/github.com/kevpar/cri/pkg/api/v1/api.pb.go deleted file mode 100644 index e29ae727f0..0000000000 --- a/test/vendor/github.com/kevpar/cri/pkg/api/v1/api.pb.go +++ /dev/null @@ -1,1344 +0,0 @@ -/* -Copyright 2021 The containerd Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: api.proto - -package api_v1 - -import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type LoadImageRequest struct { - // FilePath is the absolute path of docker image tarball. - FilePath string `protobuf:"bytes,1,opt,name=FilePath,proto3" json:"FilePath,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LoadImageRequest) Reset() { *m = LoadImageRequest{} } -func (*LoadImageRequest) ProtoMessage() {} -func (*LoadImageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{0} -} -func (m *LoadImageRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LoadImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LoadImageRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LoadImageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LoadImageRequest.Merge(m, src) -} -func (m *LoadImageRequest) XXX_Size() int { - return m.Size() -} -func (m *LoadImageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LoadImageRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LoadImageRequest proto.InternalMessageInfo - -func (m *LoadImageRequest) GetFilePath() string { - if m != nil { - return m.FilePath - } - return "" -} - -type LoadImageResponse struct { - // Images have been loaded. - Images []string `protobuf:"bytes,1,rep,name=Images,proto3" json:"Images,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LoadImageResponse) Reset() { *m = LoadImageResponse{} } -func (*LoadImageResponse) ProtoMessage() {} -func (*LoadImageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{1} -} -func (m *LoadImageResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LoadImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LoadImageResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LoadImageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LoadImageResponse.Merge(m, src) -} -func (m *LoadImageResponse) XXX_Size() int { - return m.Size() -} -func (m *LoadImageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LoadImageResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LoadImageResponse proto.InternalMessageInfo - -func (m *LoadImageResponse) GetImages() []string { - if m != nil { - return m.Images - } - return nil -} - -type ResetPodSandboxRequest struct { - // ID of the PodSandbox to reset. - PodSandboxId string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResetPodSandboxRequest) Reset() { *m = ResetPodSandboxRequest{} } -func (*ResetPodSandboxRequest) ProtoMessage() {} -func (*ResetPodSandboxRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{2} -} -func (m *ResetPodSandboxRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResetPodSandboxRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResetPodSandboxRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResetPodSandboxRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResetPodSandboxRequest.Merge(m, src) -} -func (m *ResetPodSandboxRequest) XXX_Size() int { - return m.Size() -} -func (m *ResetPodSandboxRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResetPodSandboxRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResetPodSandboxRequest proto.InternalMessageInfo - -func (m *ResetPodSandboxRequest) GetPodSandboxId() string { - if m != nil { - return m.PodSandboxId - } - return "" -} - -type ResetPodSandboxResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResetPodSandboxResponse) Reset() { *m = ResetPodSandboxResponse{} } -func (*ResetPodSandboxResponse) ProtoMessage() {} -func (*ResetPodSandboxResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{3} -} -func (m *ResetPodSandboxResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResetPodSandboxResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResetPodSandboxResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResetPodSandboxResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResetPodSandboxResponse.Merge(m, src) -} -func (m *ResetPodSandboxResponse) XXX_Size() int { - return m.Size() -} -func (m *ResetPodSandboxResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ResetPodSandboxResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ResetPodSandboxResponse proto.InternalMessageInfo - -type ResetContainerRequest struct { - // ID of the container to reset. - ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResetContainerRequest) Reset() { *m = ResetContainerRequest{} } -func (*ResetContainerRequest) ProtoMessage() {} -func (*ResetContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{4} -} -func (m *ResetContainerRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResetContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResetContainerRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResetContainerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResetContainerRequest.Merge(m, src) -} -func (m *ResetContainerRequest) XXX_Size() int { - return m.Size() -} -func (m *ResetContainerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResetContainerRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResetContainerRequest proto.InternalMessageInfo - -func (m *ResetContainerRequest) GetContainerId() string { - if m != nil { - return m.ContainerId - } - return "" -} - -type ResetContainerResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResetContainerResponse) Reset() { *m = ResetContainerResponse{} } -func (*ResetContainerResponse) ProtoMessage() {} -func (*ResetContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{5} -} -func (m *ResetContainerResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResetContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResetContainerResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResetContainerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResetContainerResponse.Merge(m, src) -} -func (m *ResetContainerResponse) XXX_Size() int { - return m.Size() -} -func (m *ResetContainerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ResetContainerResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ResetContainerResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*LoadImageRequest)(nil), "api.v1.LoadImageRequest") - proto.RegisterType((*LoadImageResponse)(nil), "api.v1.LoadImageResponse") - proto.RegisterType((*ResetPodSandboxRequest)(nil), "api.v1.ResetPodSandboxRequest") - proto.RegisterType((*ResetPodSandboxResponse)(nil), "api.v1.ResetPodSandboxResponse") - proto.RegisterType((*ResetContainerRequest)(nil), "api.v1.ResetContainerRequest") - proto.RegisterType((*ResetContainerResponse)(nil), "api.v1.ResetContainerResponse") -} - -func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } - -var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 345 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0x4f, 0x4f, 0xfa, 0x40, - 0x10, 0x65, 0xf3, 0x4b, 0xc8, 0xaf, 0x23, 0x41, 0xdc, 0x44, 0x2c, 0x8d, 0xae, 0xd8, 0x78, 0x20, - 0x31, 0x96, 0xa8, 0x37, 0x0f, 0x1e, 0x20, 0x31, 0x69, 0xe2, 0x01, 0x8b, 0x77, 0xd2, 0xb2, 0x6b, - 0xd9, 0x04, 0xba, 0x95, 0x6e, 0x89, 0x47, 0x3f, 0x82, 0x1f, 0x8b, 0xa3, 0x47, 0x8f, 0x52, 0x3f, - 0x88, 0xc6, 0xed, 0x1f, 0x01, 0xe1, 0xb6, 0xf3, 0xde, 0xbc, 0x37, 0x33, 0x2f, 0x0b, 0x9a, 0x1b, - 0x72, 0x2b, 0x9c, 0x0a, 0x29, 0x70, 0xf9, 0xe7, 0x39, 0xbb, 0x30, 0xce, 0x7d, 0x2e, 0x47, 0xb1, - 0x67, 0x0d, 0xc5, 0xa4, 0xed, 0x0b, 0x5f, 0xb4, 0x15, 0xed, 0xc5, 0x8f, 0xaa, 0x52, 0x85, 0x7a, - 0xa5, 0x32, 0xd3, 0x82, 0xda, 0x9d, 0x70, 0xa9, 0x3d, 0x71, 0x7d, 0xe6, 0xb0, 0xa7, 0x98, 0x45, - 0x12, 0x1b, 0xf0, 0xff, 0x96, 0x8f, 0x59, 0xcf, 0x95, 0x23, 0x1d, 0x35, 0x51, 0x4b, 0x73, 0x8a, - 0xda, 0x3c, 0x83, 0xbd, 0xa5, 0xfe, 0x28, 0x14, 0x41, 0xc4, 0x70, 0x1d, 0xca, 0x0a, 0x88, 0x74, - 0xd4, 0xfc, 0xd7, 0xd2, 0x9c, 0xac, 0x32, 0x6f, 0xa0, 0xee, 0xb0, 0x88, 0xc9, 0x9e, 0xa0, 0x7d, - 0x37, 0xa0, 0x9e, 0x78, 0xce, 0x47, 0x9c, 0x42, 0x35, 0x14, 0x74, 0x10, 0xa5, 0xe8, 0x80, 0xd3, - 0x6c, 0x50, 0x25, 0x2c, 0x5a, 0x6d, 0x6a, 0x36, 0xe0, 0xe0, 0x8f, 0x3e, 0x1d, 0x69, 0x5e, 0xc3, - 0xbe, 0xa2, 0xba, 0x22, 0x90, 0x2e, 0x0f, 0xd8, 0x34, 0x77, 0x3e, 0x81, 0xca, 0x30, 0xc7, 0x7e, - 0x7d, 0x77, 0x0a, 0xcc, 0xa6, 0xa6, 0x9e, 0xad, 0xb5, 0xa4, 0x4d, 0x5d, 0x2f, 0xbf, 0x10, 0xd4, - 0xba, 0x8e, 0xdd, 0x1b, 0xc7, 0x3e, 0x0f, 0xfa, 0x6c, 0x3a, 0xe3, 0x43, 0x86, 0x3b, 0xa0, 0x15, - 0x27, 0x63, 0xdd, 0x4a, 0x73, 0xb6, 0xd6, 0x53, 0x33, 0x1a, 0x1b, 0x98, 0x6c, 0xd9, 0x12, 0x7e, - 0x80, 0xdd, 0xb5, 0x4b, 0x30, 0xc9, 0xfb, 0x37, 0x47, 0x64, 0x1c, 0x6f, 0xe5, 0x0b, 0xd7, 0x7b, - 0xa8, 0xae, 0x1e, 0x82, 0x8f, 0x56, 0x44, 0xeb, 0xe1, 0x18, 0x64, 0x1b, 0x9d, 0x5b, 0x76, 0x0e, - 0xe7, 0x0b, 0x82, 0xde, 0x17, 0xa4, 0xf4, 0x92, 0x10, 0x34, 0x4f, 0x08, 0x7a, 0x4b, 0x08, 0xfa, - 0x48, 0x08, 0x7a, 0xfd, 0x24, 0x25, 0xaf, 0xac, 0x3e, 0xcd, 0xd5, 0x77, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x95, 0xb4, 0xac, 0x51, 0x78, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// CRIPluginServiceClient is the client API for CRIPluginService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type CRIPluginServiceClient interface { - // LoadImage loads a image into containerd. - LoadImage(ctx context.Context, in *LoadImageRequest, opts ...grpc.CallOption) (*LoadImageResponse, error) - // ResetPodSandbox resets the state of a sandbox and any containers that are part of the - // sandbox, recreating associated resources (such as namespaces) previously allocated - // to the sandbox. - // This call is idempotent if the sandbox is in the ready state. - ResetPodSandbox(ctx context.Context, in *ResetPodSandboxRequest, opts ...grpc.CallOption) (*ResetPodSandboxResponse, error) - // ResetContainer resets a stopped container back to the created state. - // This call is idempotent, and must not return an error if the container is already - // in the created state. - ResetContainer(ctx context.Context, in *ResetContainerRequest, opts ...grpc.CallOption) (*ResetContainerResponse, error) -} - -type cRIPluginServiceClient struct { - cc *grpc.ClientConn -} - -func NewCRIPluginServiceClient(cc *grpc.ClientConn) CRIPluginServiceClient { - return &cRIPluginServiceClient{cc} -} - -func (c *cRIPluginServiceClient) LoadImage(ctx context.Context, in *LoadImageRequest, opts ...grpc.CallOption) (*LoadImageResponse, error) { - out := new(LoadImageResponse) - err := c.cc.Invoke(ctx, "/api.v1.CRIPluginService/LoadImage", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *cRIPluginServiceClient) ResetPodSandbox(ctx context.Context, in *ResetPodSandboxRequest, opts ...grpc.CallOption) (*ResetPodSandboxResponse, error) { - out := new(ResetPodSandboxResponse) - err := c.cc.Invoke(ctx, "/api.v1.CRIPluginService/ResetPodSandbox", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *cRIPluginServiceClient) ResetContainer(ctx context.Context, in *ResetContainerRequest, opts ...grpc.CallOption) (*ResetContainerResponse, error) { - out := new(ResetContainerResponse) - err := c.cc.Invoke(ctx, "/api.v1.CRIPluginService/ResetContainer", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// CRIPluginServiceServer is the server API for CRIPluginService service. -type CRIPluginServiceServer interface { - // LoadImage loads a image into containerd. - LoadImage(context.Context, *LoadImageRequest) (*LoadImageResponse, error) - // ResetPodSandbox resets the state of a sandbox and any containers that are part of the - // sandbox, recreating associated resources (such as namespaces) previously allocated - // to the sandbox. - // This call is idempotent if the sandbox is in the ready state. - ResetPodSandbox(context.Context, *ResetPodSandboxRequest) (*ResetPodSandboxResponse, error) - // ResetContainer resets a stopped container back to the created state. - // This call is idempotent, and must not return an error if the container is already - // in the created state. - ResetContainer(context.Context, *ResetContainerRequest) (*ResetContainerResponse, error) -} - -// UnimplementedCRIPluginServiceServer can be embedded to have forward compatible implementations. -type UnimplementedCRIPluginServiceServer struct { -} - -func (*UnimplementedCRIPluginServiceServer) LoadImage(ctx context.Context, req *LoadImageRequest) (*LoadImageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LoadImage not implemented") -} -func (*UnimplementedCRIPluginServiceServer) ResetPodSandbox(ctx context.Context, req *ResetPodSandboxRequest) (*ResetPodSandboxResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ResetPodSandbox not implemented") -} -func (*UnimplementedCRIPluginServiceServer) ResetContainer(ctx context.Context, req *ResetContainerRequest) (*ResetContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ResetContainer not implemented") -} - -func RegisterCRIPluginServiceServer(s *grpc.Server, srv CRIPluginServiceServer) { - s.RegisterService(&_CRIPluginService_serviceDesc, srv) -} - -func _CRIPluginService_LoadImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LoadImageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CRIPluginServiceServer).LoadImage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.v1.CRIPluginService/LoadImage", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CRIPluginServiceServer).LoadImage(ctx, req.(*LoadImageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _CRIPluginService_ResetPodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResetPodSandboxRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CRIPluginServiceServer).ResetPodSandbox(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.v1.CRIPluginService/ResetPodSandbox", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CRIPluginServiceServer).ResetPodSandbox(ctx, req.(*ResetPodSandboxRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _CRIPluginService_ResetContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResetContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CRIPluginServiceServer).ResetContainer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.v1.CRIPluginService/ResetContainer", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CRIPluginServiceServer).ResetContainer(ctx, req.(*ResetContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _CRIPluginService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "api.v1.CRIPluginService", - HandlerType: (*CRIPluginServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LoadImage", - Handler: _CRIPluginService_LoadImage_Handler, - }, - { - MethodName: "ResetPodSandbox", - Handler: _CRIPluginService_ResetPodSandbox_Handler, - }, - { - MethodName: "ResetContainer", - Handler: _CRIPluginService_ResetContainer_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "api.proto", -} - -func (m *LoadImageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LoadImageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LoadImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.FilePath) > 0 { - i -= len(m.FilePath) - copy(dAtA[i:], m.FilePath) - i = encodeVarintApi(dAtA, i, uint64(len(m.FilePath))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LoadImageResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LoadImageResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LoadImageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Images) > 0 { - for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Images[iNdEx]) - copy(dAtA[i:], m.Images[iNdEx]) - i = encodeVarintApi(dAtA, i, uint64(len(m.Images[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResetPodSandboxRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResetPodSandboxRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResetPodSandboxRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PodSandboxId) > 0 { - i -= len(m.PodSandboxId) - copy(dAtA[i:], m.PodSandboxId) - i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResetPodSandboxResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResetPodSandboxResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResetPodSandboxResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ResetContainerRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResetContainerRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResetContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ContainerId) > 0 { - i -= len(m.ContainerId) - copy(dAtA[i:], m.ContainerId) - i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResetContainerResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResetContainerResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResetContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintApi(dAtA []byte, offset int, v uint64) int { - offset -= sovApi(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *LoadImageRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.FilePath) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *LoadImageResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Images) > 0 { - for _, s := range m.Images { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *ResetPodSandboxRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PodSandboxId) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *ResetPodSandboxResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *ResetContainerRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerId) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *ResetContainerResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovApi(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozApi(x uint64) (n int) { - return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *LoadImageRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LoadImageRequest{`, - `FilePath:` + fmt.Sprintf("%v", this.FilePath) + `,`, - `}`, - }, "") - return s -} -func (this *LoadImageResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LoadImageResponse{`, - `Images:` + fmt.Sprintf("%v", this.Images) + `,`, - `}`, - }, "") - return s -} -func (this *ResetPodSandboxRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResetPodSandboxRequest{`, - `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, - `}`, - }, "") - return s -} -func (this *ResetPodSandboxResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResetPodSandboxResponse{`, - `}`, - }, "") - return s -} -func (this *ResetContainerRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResetContainerRequest{`, - `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, - `}`, - }, "") - return s -} -func (this *ResetContainerResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResetContainerResponse{`, - `}`, - }, "") - return s -} -func valueToStringApi(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *LoadImageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LoadImageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LoadImageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FilePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LoadImageResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LoadImageResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LoadImageResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Images = append(m.Images, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResetPodSandboxRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResetPodSandboxRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResetPodSandboxRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodSandboxId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResetPodSandboxResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResetPodSandboxResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResetPodSandboxResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResetContainerRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResetContainerRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResetContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResetContainerResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResetContainerResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResetContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipApi(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthApi - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupApi - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthApi - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") -) diff --git a/test/vendor/github.com/kevpar/cri/pkg/api/v1/api.proto b/test/vendor/github.com/kevpar/cri/pkg/api/v1/api.proto deleted file mode 100644 index 62839e64ca..0000000000 --- a/test/vendor/github.com/kevpar/cri/pkg/api/v1/api.proto +++ /dev/null @@ -1,54 +0,0 @@ -// To regenerate api.pb.go run `make proto` -syntax = 'proto3'; - -package api.v1; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option (gogoproto.goproto_getters_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_unrecognized_all) = false; - -// CRIPluginService defines non-CRI APIs for cri plugin. -service CRIPluginService{ - // LoadImage loads a image into containerd. - rpc LoadImage(LoadImageRequest) returns (LoadImageResponse) {} - // ResetPodSandbox resets the state of a sandbox and any containers that are part of the - // sandbox, recreating previously allocated resources (such as namespaces) that were freed - // on stop, and retaining other resources. - // This call is idempotent if the sandbox is in the ready state. - rpc ResetPodSandbox(ResetPodSandboxRequest) returns (ResetPodSandboxResponse) {} - // ResetContainer resets a stopped container back to the created state, keeping - // its scratch space untouched. - // This call is idempotent, and must not return an error if the container is already - // in the created state. - rpc ResetContainer(ResetContainerRequest) returns (ResetContainerResponse) {} -} - -message LoadImageRequest { - // FilePath is the absolute path of docker image tarball. - string FilePath = 1; -} - -message LoadImageResponse { - // Images have been loaded. - repeated string Images = 1; -} - -message ResetPodSandboxRequest { - // ID of the PodSandbox to reset. - string pod_sandbox_id = 1; -} - -message ResetPodSandboxResponse {} - -message ResetContainerRequest { - // ID of the container to reset. - string container_id = 1; -} - -message ResetContainerResponse {} diff --git a/test/vendor/modules.txt b/test/vendor/modules.txt deleted file mode 100644 index 2a56bbbc54..0000000000 --- a/test/vendor/modules.txt +++ /dev/null @@ -1,415 +0,0 @@ -# github.com/Microsoft/go-winio v0.5.2 -## explicit; go 1.13 -github.com/Microsoft/go-winio -github.com/Microsoft/go-winio/backuptar -github.com/Microsoft/go-winio/pkg/guid -github.com/Microsoft/go-winio/pkg/process -github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.8.23 => ../ -## explicit; go 1.17 -github.com/Microsoft/hcsshim -github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options -github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats -github.com/Microsoft/hcsshim/computestorage -github.com/Microsoft/hcsshim/ext4/dmverity -github.com/Microsoft/hcsshim/ext4/internal/compactext4 -github.com/Microsoft/hcsshim/ext4/internal/format -github.com/Microsoft/hcsshim/ext4/tar2ext4 -github.com/Microsoft/hcsshim/hcn -github.com/Microsoft/hcsshim/internal/clone -github.com/Microsoft/hcsshim/internal/cmd -github.com/Microsoft/hcsshim/internal/cni -github.com/Microsoft/hcsshim/internal/computeagent -github.com/Microsoft/hcsshim/internal/copyfile -github.com/Microsoft/hcsshim/internal/cow -github.com/Microsoft/hcsshim/internal/cpugroup -github.com/Microsoft/hcsshim/internal/credentials -github.com/Microsoft/hcsshim/internal/devices -github.com/Microsoft/hcsshim/internal/extendedtask -github.com/Microsoft/hcsshim/internal/gcs -github.com/Microsoft/hcsshim/internal/guest/spec -github.com/Microsoft/hcsshim/internal/guestpath -github.com/Microsoft/hcsshim/internal/hcs -github.com/Microsoft/hcsshim/internal/hcs/resourcepaths -github.com/Microsoft/hcsshim/internal/hcs/schema1 -github.com/Microsoft/hcsshim/internal/hcs/schema2 -github.com/Microsoft/hcsshim/internal/hcserror -github.com/Microsoft/hcsshim/internal/hcsoci -github.com/Microsoft/hcsshim/internal/hns -github.com/Microsoft/hcsshim/internal/hooks -github.com/Microsoft/hcsshim/internal/interop -github.com/Microsoft/hcsshim/internal/jobobject -github.com/Microsoft/hcsshim/internal/layers -github.com/Microsoft/hcsshim/internal/lcow -github.com/Microsoft/hcsshim/internal/log -github.com/Microsoft/hcsshim/internal/logfields -github.com/Microsoft/hcsshim/internal/longpath -github.com/Microsoft/hcsshim/internal/memory -github.com/Microsoft/hcsshim/internal/mergemaps -github.com/Microsoft/hcsshim/internal/ncproxy/networking -github.com/Microsoft/hcsshim/internal/ncproxyttrpc -github.com/Microsoft/hcsshim/internal/oc -github.com/Microsoft/hcsshim/internal/oci -github.com/Microsoft/hcsshim/internal/ospath -github.com/Microsoft/hcsshim/internal/processorinfo -github.com/Microsoft/hcsshim/internal/protocol/guestrequest -github.com/Microsoft/hcsshim/internal/protocol/guestresource -github.com/Microsoft/hcsshim/internal/queue -github.com/Microsoft/hcsshim/internal/regstate -github.com/Microsoft/hcsshim/internal/resources -github.com/Microsoft/hcsshim/internal/runhcs -github.com/Microsoft/hcsshim/internal/safefile -github.com/Microsoft/hcsshim/internal/schemaversion -github.com/Microsoft/hcsshim/internal/security -github.com/Microsoft/hcsshim/internal/shimdiag -github.com/Microsoft/hcsshim/internal/timeout -github.com/Microsoft/hcsshim/internal/tools/securitypolicy/helpers -github.com/Microsoft/hcsshim/internal/uvm -github.com/Microsoft/hcsshim/internal/uvmfolder -github.com/Microsoft/hcsshim/internal/vmcompute -github.com/Microsoft/hcsshim/internal/wclayer -github.com/Microsoft/hcsshim/internal/wcow -github.com/Microsoft/hcsshim/internal/winapi -github.com/Microsoft/hcsshim/osversion -github.com/Microsoft/hcsshim/pkg/annotations -github.com/Microsoft/hcsshim/pkg/go-runhcs -github.com/Microsoft/hcsshim/pkg/ociwclayer -github.com/Microsoft/hcsshim/pkg/octtrpc -github.com/Microsoft/hcsshim/pkg/securitypolicy -# github.com/bits-and-blooms/bitset v1.2.0 -## explicit; go 1.14 -github.com/bits-and-blooms/bitset -# github.com/blang/semver v3.5.1+incompatible -## explicit -github.com/blang/semver -# github.com/cenkalti/backoff/v4 v4.1.1 -## explicit; go 1.13 -github.com/cenkalti/backoff/v4 -# github.com/containerd/cgroups v1.0.1 -## explicit; go 1.13 -github.com/containerd/cgroups/stats/v1 -# github.com/containerd/console v1.0.2 -## explicit; go 1.13 -github.com/containerd/console -# github.com/containerd/containerd v1.5.10 -## explicit; go 1.16 -github.com/containerd/containerd -github.com/containerd/containerd/api/events -github.com/containerd/containerd/api/services/containers/v1 -github.com/containerd/containerd/api/services/content/v1 -github.com/containerd/containerd/api/services/diff/v1 -github.com/containerd/containerd/api/services/events/v1 -github.com/containerd/containerd/api/services/images/v1 -github.com/containerd/containerd/api/services/introspection/v1 -github.com/containerd/containerd/api/services/leases/v1 -github.com/containerd/containerd/api/services/namespaces/v1 -github.com/containerd/containerd/api/services/snapshots/v1 -github.com/containerd/containerd/api/services/tasks/v1 -github.com/containerd/containerd/api/services/version/v1 -github.com/containerd/containerd/api/types -github.com/containerd/containerd/api/types/task -github.com/containerd/containerd/archive -github.com/containerd/containerd/archive/compression -github.com/containerd/containerd/cio -github.com/containerd/containerd/containers -github.com/containerd/containerd/content -github.com/containerd/containerd/content/proxy -github.com/containerd/containerd/defaults -github.com/containerd/containerd/diff -github.com/containerd/containerd/errdefs -github.com/containerd/containerd/events -github.com/containerd/containerd/events/exchange -github.com/containerd/containerd/filters -github.com/containerd/containerd/identifiers -github.com/containerd/containerd/images -github.com/containerd/containerd/images/archive -github.com/containerd/containerd/integration/remote/util -github.com/containerd/containerd/labels -github.com/containerd/containerd/leases -github.com/containerd/containerd/leases/proxy -github.com/containerd/containerd/log -github.com/containerd/containerd/mount -github.com/containerd/containerd/namespaces -github.com/containerd/containerd/oci -github.com/containerd/containerd/pkg/cap -github.com/containerd/containerd/pkg/dialer -github.com/containerd/containerd/pkg/userns -github.com/containerd/containerd/platforms -github.com/containerd/containerd/plugin -github.com/containerd/containerd/reference -github.com/containerd/containerd/reference/docker -github.com/containerd/containerd/remotes -github.com/containerd/containerd/remotes/docker -github.com/containerd/containerd/remotes/docker/auth -github.com/containerd/containerd/remotes/docker/schema1 -github.com/containerd/containerd/remotes/errors -github.com/containerd/containerd/rootfs -github.com/containerd/containerd/runtime -github.com/containerd/containerd/runtime/linux/runctypes -github.com/containerd/containerd/runtime/v2/runc/options -github.com/containerd/containerd/runtime/v2/task -github.com/containerd/containerd/services -github.com/containerd/containerd/services/introspection -github.com/containerd/containerd/snapshots -github.com/containerd/containerd/snapshots/proxy -github.com/containerd/containerd/sys -github.com/containerd/containerd/version -# github.com/containerd/continuity v0.1.0 -## explicit; go 1.13 -github.com/containerd/continuity/fs -github.com/containerd/continuity/sysx -# github.com/containerd/fifo v1.0.0 -## explicit; go 1.13 -github.com/containerd/fifo -# github.com/containerd/go-runc v1.0.0 -## explicit; go 1.13 -github.com/containerd/go-runc -# github.com/containerd/ttrpc v1.1.0 -## explicit; go 1.13 -github.com/containerd/ttrpc -# github.com/containerd/typeurl v1.0.2 -## explicit; go 1.13 -github.com/containerd/typeurl -# github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017 -## explicit -github.com/docker/cli/cli/config -github.com/docker/cli/cli/config/configfile -github.com/docker/cli/cli/config/credentials -github.com/docker/cli/cli/config/types -# github.com/docker/distribution v2.7.1+incompatible -## explicit -github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7 -## explicit -github.com/docker/docker/pkg/homedir -# github.com/docker/docker-credential-helpers v0.6.3 -## explicit -github.com/docker/docker-credential-helpers/client -github.com/docker/docker-credential-helpers/credentials -# github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c -## explicit -github.com/docker/go-events -# github.com/gogo/googleapis v1.4.0 -## explicit; go 1.12 -github.com/gogo/googleapis/google/rpc -# github.com/gogo/protobuf v1.3.2 -## explicit; go 1.15 -github.com/gogo/protobuf/gogoproto -github.com/gogo/protobuf/proto -github.com/gogo/protobuf/protoc-gen-gogo/descriptor -github.com/gogo/protobuf/sortkeys -github.com/gogo/protobuf/types -# github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e -## explicit -github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.5.0 -## explicit; go 1.9 -github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/timestamp -# github.com/google/go-containerregistry v0.5.1 -## explicit; go 1.14 -github.com/google/go-containerregistry/internal/and -github.com/google/go-containerregistry/internal/gzip -github.com/google/go-containerregistry/internal/redact -github.com/google/go-containerregistry/internal/retry -github.com/google/go-containerregistry/internal/retry/wait -github.com/google/go-containerregistry/internal/verify -github.com/google/go-containerregistry/pkg/authn -github.com/google/go-containerregistry/pkg/logs -github.com/google/go-containerregistry/pkg/name -github.com/google/go-containerregistry/pkg/v1 -github.com/google/go-containerregistry/pkg/v1/match -github.com/google/go-containerregistry/pkg/v1/partial -github.com/google/go-containerregistry/pkg/v1/remote -github.com/google/go-containerregistry/pkg/v1/remote/transport -github.com/google/go-containerregistry/pkg/v1/stream -github.com/google/go-containerregistry/pkg/v1/types -# github.com/google/uuid v1.3.0 -## explicit -github.com/google/uuid -# github.com/hashicorp/errwrap v1.0.0 -## explicit -github.com/hashicorp/errwrap -# github.com/hashicorp/go-multierror v1.0.0 -## explicit -github.com/hashicorp/go-multierror -# github.com/kevpar/cri v1.11.1-0.20220302210600-4c5c347230b2 -## explicit -github.com/kevpar/cri/pkg/annotations -github.com/kevpar/cri/pkg/api/v1 -# github.com/klauspost/compress v1.11.13 -## explicit; go 1.13 -github.com/klauspost/compress/fse -github.com/klauspost/compress/huff0 -github.com/klauspost/compress/snappy -github.com/klauspost/compress/zstd -github.com/klauspost/compress/zstd/internal/xxhash -# github.com/moby/locker v1.0.1 -## explicit; go 1.13 -github.com/moby/locker -# github.com/moby/sys/mountinfo v0.4.1 -## explicit; go 1.14 -github.com/moby/sys/mountinfo -# github.com/opencontainers/go-digest v1.0.0 -## explicit; go 1.13 -github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.0.2 -## explicit -github.com/opencontainers/image-spec/identity -github.com/opencontainers/image-spec/specs-go -github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.0.3 -## explicit; go 1.13 -github.com/opencontainers/runc/libcontainer/user -# github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 -## explicit -github.com/opencontainers/runtime-spec/specs-go -# github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39 -## explicit -github.com/opencontainers/runtime-tools/error -github.com/opencontainers/runtime-tools/filepath -github.com/opencontainers/runtime-tools/generate -github.com/opencontainers/runtime-tools/generate/seccomp -github.com/opencontainers/runtime-tools/specerror -github.com/opencontainers/runtime-tools/validate -# github.com/opencontainers/selinux v1.8.2 -## explicit; go 1.13 -github.com/opencontainers/selinux/go-selinux -github.com/opencontainers/selinux/go-selinux/label -github.com/opencontainers/selinux/pkg/pwalk -# github.com/pkg/errors v0.9.1 -## explicit -github.com/pkg/errors -# github.com/sirupsen/logrus v1.8.1 -## explicit; go 1.13 -github.com/sirupsen/logrus -# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 -## explicit -github.com/syndtr/gocapability/capability -# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f -## explicit -github.com/xeipuuv/gojsonpointer -# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 -## explicit -github.com/xeipuuv/gojsonreference -# github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f -## explicit -github.com/xeipuuv/gojsonschema -# go.opencensus.io v0.22.3 -## explicit; go 1.13 -go.opencensus.io -go.opencensus.io/internal -go.opencensus.io/trace -go.opencensus.io/trace/internal -go.opencensus.io/trace/propagation -go.opencensus.io/trace/tracestate -# golang.org/x/net v0.0.0-20210825183410-e898025ed96a -## explicit; go 1.17 -golang.org/x/net/context/ctxhttp -golang.org/x/net/http/httpguts -golang.org/x/net/http2 -golang.org/x/net/http2/hpack -golang.org/x/net/idna -golang.org/x/net/internal/timeseries -golang.org/x/net/trace -# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c -## explicit -golang.org/x/sync/errgroup -golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e -## explicit; go 1.17 -golang.org/x/sys/internal/unsafeheader -golang.org/x/sys/unix -golang.org/x/sys/windows -golang.org/x/sys/windows/registry -golang.org/x/sys/windows/svc -golang.org/x/sys/windows/svc/mgr -# golang.org/x/text v0.3.6 -## explicit; go 1.11 -golang.org/x/text/secure/bidirule -golang.org/x/text/transform -golang.org/x/text/unicode/bidi -golang.org/x/text/unicode/norm -# google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 -## explicit; go 1.11 -google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.40.0 => google.golang.org/grpc v1.27.1 -## explicit; go 1.11 -google.golang.org/grpc -google.golang.org/grpc/attributes -google.golang.org/grpc/backoff -google.golang.org/grpc/balancer -google.golang.org/grpc/balancer/base -google.golang.org/grpc/balancer/roundrobin -google.golang.org/grpc/binarylog/grpc_binarylog_v1 -google.golang.org/grpc/codes -google.golang.org/grpc/connectivity -google.golang.org/grpc/credentials -google.golang.org/grpc/credentials/internal -google.golang.org/grpc/encoding -google.golang.org/grpc/encoding/proto -google.golang.org/grpc/grpclog -google.golang.org/grpc/health/grpc_health_v1 -google.golang.org/grpc/internal -google.golang.org/grpc/internal/backoff -google.golang.org/grpc/internal/balancerload -google.golang.org/grpc/internal/binarylog -google.golang.org/grpc/internal/buffer -google.golang.org/grpc/internal/channelz -google.golang.org/grpc/internal/envconfig -google.golang.org/grpc/internal/grpcrand -google.golang.org/grpc/internal/grpcsync -google.golang.org/grpc/internal/resolver/dns -google.golang.org/grpc/internal/resolver/passthrough -google.golang.org/grpc/internal/syscall -google.golang.org/grpc/internal/transport -google.golang.org/grpc/keepalive -google.golang.org/grpc/metadata -google.golang.org/grpc/naming -google.golang.org/grpc/peer -google.golang.org/grpc/resolver -google.golang.org/grpc/serviceconfig -google.golang.org/grpc/stats -google.golang.org/grpc/status -google.golang.org/grpc/tap -# google.golang.org/protobuf v1.27.1 -## explicit; go 1.9 -google.golang.org/protobuf/encoding/prototext -google.golang.org/protobuf/encoding/protowire -google.golang.org/protobuf/internal/descfmt -google.golang.org/protobuf/internal/descopts -google.golang.org/protobuf/internal/detrand -google.golang.org/protobuf/internal/encoding/defval -google.golang.org/protobuf/internal/encoding/messageset -google.golang.org/protobuf/internal/encoding/tag -google.golang.org/protobuf/internal/encoding/text -google.golang.org/protobuf/internal/errors -google.golang.org/protobuf/internal/filedesc -google.golang.org/protobuf/internal/filetype -google.golang.org/protobuf/internal/flags -google.golang.org/protobuf/internal/genid -google.golang.org/protobuf/internal/impl -google.golang.org/protobuf/internal/order -google.golang.org/protobuf/internal/pragma -google.golang.org/protobuf/internal/set -google.golang.org/protobuf/internal/strs -google.golang.org/protobuf/internal/version -google.golang.org/protobuf/proto -google.golang.org/protobuf/reflect/protodesc -google.golang.org/protobuf/reflect/protoreflect -google.golang.org/protobuf/reflect/protoregistry -google.golang.org/protobuf/runtime/protoiface -google.golang.org/protobuf/runtime/protoimpl -google.golang.org/protobuf/types/descriptorpb -google.golang.org/protobuf/types/known/anypb -google.golang.org/protobuf/types/known/durationpb -google.golang.org/protobuf/types/known/timestamppb -# k8s.io/cri-api v0.20.6 -## explicit; go 1.15 -k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -# github.com/Microsoft/hcsshim => ../ -# google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 -# google.golang.org/grpc => google.golang.org/grpc v1.27.1