From 00e1230945560020bf2c94e461428ad3c42a0075 Mon Sep 17 00:00:00 2001 From: Cory Snider Date: Thu, 22 Feb 2024 20:49:15 -0500 Subject: [PATCH] swarmd: convert to Go module And make the repository root a Go workspace. Signed-off-by: Cory Snider --- go.mod | 19 +- go.sum | 35 - go.work | 6 + go.work.sum | 1 + swarmd/go.mod | 99 + swarmd/go.sum | 691 + vendor/github.com/docker/docker/api/README.md | 42 - vendor/github.com/docker/docker/api/common.go | 11 - .../docker/docker/api/common_unix.go | 6 - .../docker/docker/api/common_windows.go | 8 - .../docker/docker/api/swagger-gen.yaml | 12 - .../github.com/docker/docker/api/swagger.yaml | 12183 ---------------- .../docker/docker/api/types/blkiodev/blkio.go | 23 - .../docker/api/types/checkpoint/list.go | 7 - .../docker/api/types/checkpoint/options.go | 19 - .../docker/docker/api/types/client.go | 426 - .../docker/docker/api/types/configs.go | 67 - .../docker/api/types/container/change_type.go | 15 - .../api/types/container/change_types.go | 23 - .../docker/api/types/container/config.go | 79 - .../api/types/container/container_top.go | 22 - .../api/types/container/container_update.go | 16 - .../api/types/container/create_response.go | 19 - .../docker/api/types/container/errors.go | 9 - .../api/types/container/filesystem_change.go | 19 - .../docker/api/types/container/hostconfig.go | 493 - .../api/types/container/hostconfig_unix.go | 41 - .../api/types/container/hostconfig_windows.go | 40 - .../api/types/container/wait_exit_error.go | 12 - .../api/types/container/wait_response.go | 18 - .../api/types/container/waitcondition.go | 22 - .../docker/docker/api/types/error_response.go | 13 - .../docker/api/types/error_response_ext.go | 6 - .../docker/docker/api/types/events/events.go | 127 - .../docker/docker/api/types/filters/errors.go | 37 - .../docker/docker/api/types/filters/parse.go | 346 - .../docker/api/types/graph_driver_data.go | 23 - .../docker/docker/api/types/id_response.go | 13 - .../docker/api/types/image/image_history.go | 36 - .../docker/docker/api/types/image/opts.go | 9 - .../api/types/image_delete_response_item.go | 15 - .../docker/docker/api/types/image_summary.go | 89 - .../docker/docker/api/types/mount/mount.go | 145 - .../docker/docker/api/types/network/ipam.go | 132 - .../docker/api/types/network/network.go | 111 - .../docker/docker/api/types/plugin.go | 203 - .../docker/docker/api/types/plugin_device.go | 25 - .../docker/docker/api/types/plugin_env.go | 25 - .../docker/api/types/plugin_interface_type.go | 21 - .../docker/docker/api/types/plugin_mount.go | 37 - .../docker/api/types/plugin_responses.go | 71 - .../docker/docker/api/types/port.go | 23 - .../docker/api/types/registry/authconfig.go | 99 - .../docker/api/types/registry/authenticate.go | 21 - .../docker/api/types/registry/registry.go | 122 - .../api/types/service_update_response.go | 12 - .../docker/docker/api/types/stats.go | 181 - .../docker/api/types/strslice/strslice.go | 30 - .../docker/docker/api/types/swarm/common.go | 48 - .../docker/docker/api/types/swarm/config.go | 40 - .../docker/api/types/swarm/container.go | 80 - .../docker/docker/api/types/swarm/network.go | 121 - .../docker/docker/api/types/swarm/node.go | 139 - .../docker/docker/api/types/swarm/runtime.go | 27 - .../docker/api/types/swarm/runtime/gen.go | 3 - .../api/types/swarm/runtime/plugin.pb.go | 808 - .../api/types/swarm/runtime/plugin.proto | 19 - .../docker/docker/api/types/swarm/secret.go | 36 - .../docker/docker/api/types/swarm/service.go | 202 - .../docker/docker/api/types/swarm/swarm.go | 237 - .../docker/docker/api/types/swarm/task.go | 225 - .../docker/docker/api/types/system/info.go | 116 - .../docker/docker/api/types/system/runtime.go | 14 - .../docker/api/types/system/security_opts.go | 48 - .../docker/docker/api/types/time/timestamp.go | 131 - .../docker/docker/api/types/types.go | 633 - .../docker/api/types/types_deprecated.go | 72 - .../docker/api/types/versions/README.md | 14 - .../docker/api/types/versions/compare.go | 65 - .../docker/api/types/volume/cluster_volume.go | 420 - .../docker/api/types/volume/create_options.go | 29 - .../docker/api/types/volume/list_response.go | 18 - .../docker/docker/api/types/volume/options.go | 8 - .../docker/docker/api/types/volume/volume.go | 75 - .../docker/api/types/volume/volume_update.go | 7 - .../github.com/docker/docker/client/README.md | 36 - .../docker/docker/client/build_cancel.go | 16 - .../docker/docker/client/build_prune.go | 45 - .../docker/docker/client/checkpoint_create.go | 14 - .../docker/docker/client/checkpoint_delete.go | 20 - .../docker/docker/client/checkpoint_list.go | 28 - .../github.com/docker/docker/client/client.go | 415 - .../docker/docker/client/client_deprecated.go | 27 - .../docker/docker/client/client_unix.go | 7 - .../docker/docker/client/client_windows.go | 5 - .../docker/docker/client/config_create.go | 25 - .../docker/docker/client/config_inspect.go | 36 - .../docker/docker/client/config_list.go | 38 - .../docker/docker/client/config_remove.go | 13 - .../docker/docker/client/config_update.go | 20 - .../docker/docker/client/container_attach.go | 59 - .../docker/docker/client/container_commit.go | 55 - .../docker/docker/client/container_copy.go | 93 - .../docker/docker/client/container_create.go | 86 - .../docker/docker/client/container_diff.go | 23 - .../docker/docker/client/container_exec.go | 66 - .../docker/docker/client/container_export.go | 19 - .../docker/docker/client/container_inspect.go | 53 - .../docker/docker/client/container_kill.go | 18 - .../docker/docker/client/container_list.go | 56 - .../docker/docker/client/container_logs.go | 80 - .../docker/docker/client/container_pause.go | 10 - .../docker/docker/client/container_prune.go | 36 - .../docker/docker/client/container_remove.go | 27 - .../docker/docker/client/container_rename.go | 15 - .../docker/docker/client/container_resize.go | 29 - .../docker/docker/client/container_restart.go | 26 - .../docker/docker/client/container_start.go | 23 - .../docker/docker/client/container_stats.go | 46 - .../docker/docker/client/container_stop.go | 30 - .../docker/docker/client/container_top.go | 28 - .../docker/docker/client/container_unpause.go | 10 - .../docker/docker/client/container_update.go | 21 - .../docker/docker/client/container_wait.go | 104 - .../docker/docker/client/disk_usage.go | 33 - .../docker/client/distribution_inspect.go | 39 - .../docker/docker/client/envvars.go | 90 - .../github.com/docker/docker/client/errors.go | 58 - .../github.com/docker/docker/client/events.go | 101 - .../github.com/docker/docker/client/hijack.go | 164 - .../docker/docker/client/image_build.go | 144 - .../docker/docker/client/image_create.go | 40 - .../docker/docker/client/image_history.go | 22 - .../docker/docker/client/image_import.go | 40 - .../docker/docker/client/image_inspect.go | 32 - .../docker/docker/client/image_list.go | 49 - .../docker/docker/client/image_load.go | 31 - .../docker/docker/client/image_prune.go | 36 - .../docker/docker/client/image_pull.go | 64 - .../docker/docker/client/image_push.go | 57 - .../docker/docker/client/image_remove.go | 31 - .../docker/docker/client/image_save.go | 21 - .../docker/docker/client/image_search.go | 55 - .../docker/docker/client/image_tag.go | 37 - .../github.com/docker/docker/client/info.go | 26 - .../docker/docker/client/interface.go | 202 - .../docker/client/interface_experimental.go | 18 - .../docker/docker/client/interface_stable.go | 10 - .../github.com/docker/docker/client/login.go | 24 - .../docker/docker/client/network_connect.go | 19 - .../docker/docker/client/network_create.go | 25 - .../docker/client/network_disconnect.go | 15 - .../docker/docker/client/network_inspect.go | 49 - .../docker/docker/client/network_list.go | 32 - .../docker/docker/client/network_prune.go | 36 - .../docker/docker/client/network_remove.go | 10 - .../docker/docker/client/node_inspect.go | 32 - .../docker/docker/client/node_list.go | 35 - .../docker/docker/client/node_remove.go | 20 - .../docker/docker/client/node_update.go | 17 - .../docker/docker/client/options.go | 233 - .../github.com/docker/docker/client/ping.go | 73 - .../docker/docker/client/plugin_create.go | 23 - .../docker/docker/client/plugin_disable.go | 19 - .../docker/docker/client/plugin_enable.go | 19 - .../docker/docker/client/plugin_inspect.go | 31 - .../docker/docker/client/plugin_install.go | 117 - .../docker/docker/client/plugin_list.go | 33 - .../docker/docker/client/plugin_push.go | 20 - .../docker/docker/client/plugin_remove.go | 20 - .../docker/docker/client/plugin_set.go | 12 - .../docker/docker/client/plugin_upgrade.go | 42 - .../docker/docker/client/request.go | 282 - .../docker/docker/client/secret_create.go | 25 - .../docker/docker/client/secret_inspect.go | 36 - .../docker/docker/client/secret_list.go | 38 - .../docker/docker/client/secret_remove.go | 13 - .../docker/docker/client/secret_update.go | 20 - .../docker/docker/client/service_create.go | 184 - .../docker/docker/client/service_inspect.go | 37 - .../docker/docker/client/service_list.go | 39 - .../docker/docker/client/service_logs.go | 52 - .../docker/docker/client/service_remove.go | 10 - .../docker/docker/client/service_update.go | 79 - .../docker/client/swarm_get_unlock_key.go | 21 - .../docker/docker/client/swarm_init.go | 21 - .../docker/docker/client/swarm_inspect.go | 21 - .../docker/docker/client/swarm_join.go | 14 - .../docker/docker/client/swarm_leave.go | 17 - .../docker/docker/client/swarm_unlock.go | 14 - .../docker/docker/client/swarm_update.go | 21 - .../docker/docker/client/task_inspect.go | 32 - .../docker/docker/client/task_list.go | 35 - .../docker/docker/client/task_logs.go | 51 - .../github.com/docker/docker/client/utils.go | 34 - .../docker/docker/client/version.go | 21 - .../docker/docker/client/volume_create.go | 20 - .../docker/docker/client/volume_inspect.go | 38 - .../docker/docker/client/volume_list.go | 33 - .../docker/docker/client/volume_prune.go | 36 - .../docker/docker/client/volume_remove.go | 21 - .../docker/docker/client/volume_update.go | 24 - .../docker/image/spec/specs-go/v1/image.go | 54 - .../docker/internal/multierror/multierror.go | 46 - .../docker/go-connections/nat/nat.go | 240 - .../docker/go-connections/nat/parse.go | 33 - .../docker/go-connections/nat/sort.go | 96 - .../docker/go-units/CONTRIBUTING.md | 67 - vendor/github.com/docker/go-units/LICENSE | 191 - vendor/github.com/docker/go-units/MAINTAINERS | 46 - vendor/github.com/docker/go-units/README.md | 16 - vendor/github.com/docker/go-units/circle.yml | 11 - vendor/github.com/docker/go-units/duration.go | 35 - vendor/github.com/docker/go-units/size.go | 154 - vendor/github.com/docker/go-units/ulimit.go | 123 - .../github.com/felixge/httpsnoop/.gitignore | 0 .../github.com/felixge/httpsnoop/.travis.yml | 6 - .../github.com/felixge/httpsnoop/LICENSE.txt | 19 - vendor/github.com/felixge/httpsnoop/Makefile | 10 - vendor/github.com/felixge/httpsnoop/README.md | 95 - .../felixge/httpsnoop/capture_metrics.go | 86 - vendor/github.com/felixge/httpsnoop/docs.go | 10 - .../httpsnoop/wrap_generated_gteq_1.8.go | 436 - .../httpsnoop/wrap_generated_lt_1.8.go | 278 - vendor/github.com/go-logr/logr/funcr/funcr.go | 804 - vendor/github.com/go-logr/stdr/LICENSE | 201 - vendor/github.com/go-logr/stdr/README.md | 6 - vendor/github.com/go-logr/stdr/stdr.go | 170 - .../opencontainers/image-spec/LICENSE | 191 - .../image-spec/specs-go/v1/annotations.go | 62 - .../image-spec/specs-go/v1/config.go | 111 - .../image-spec/specs-go/v1/descriptor.go | 80 - .../image-spec/specs-go/v1/index.go | 38 - .../image-spec/specs-go/v1/layout.go | 32 - .../image-spec/specs-go/v1/manifest.go | 41 - .../image-spec/specs-go/v1/mediatype.go | 75 - .../image-spec/specs-go/version.go | 32 - .../image-spec/specs-go/versioned.go | 23 - .../instrumentation/net/http/otelhttp/LICENSE | 201 - .../net/http/otelhttp/client.go | 61 - .../net/http/otelhttp/common.go | 46 - .../net/http/otelhttp/config.go | 187 - .../instrumentation/net/http/otelhttp/doc.go | 18 - .../net/http/otelhttp/handler.go | 236 - .../net/http/otelhttp/labeler.go | 65 - .../net/http/otelhttp/transport.go | 190 - .../net/http/otelhttp/version.go | 26 - .../instrumentation/net/http/otelhttp/wrap.go | 96 - .../go.opentelemetry.io/otel/.gitattributes | 3 - vendor/go.opentelemetry.io/otel/.gitignore | 21 - vendor/go.opentelemetry.io/otel/.gitmodules | 3 - vendor/go.opentelemetry.io/otel/.golangci.yml | 47 - .../otel/.markdown-link.json | 16 - .../otel/.markdownlint.yaml | 29 - vendor/go.opentelemetry.io/otel/CHANGELOG.md | 1733 --- vendor/go.opentelemetry.io/otel/CODEOWNERS | 17 - .../go.opentelemetry.io/otel/CONTRIBUTING.md | 521 - vendor/go.opentelemetry.io/otel/LICENSE | 201 - vendor/go.opentelemetry.io/otel/Makefile | 212 - vendor/go.opentelemetry.io/otel/README.md | 102 - vendor/go.opentelemetry.io/otel/RELEASING.md | 132 - vendor/go.opentelemetry.io/otel/VERSIONING.md | 224 - .../go.opentelemetry.io/otel/attribute/doc.go | 16 - .../otel/attribute/encoder.go | 150 - .../otel/attribute/iterator.go | 143 - .../go.opentelemetry.io/otel/attribute/key.go | 134 - .../go.opentelemetry.io/otel/attribute/kv.go | 86 - .../go.opentelemetry.io/otel/attribute/set.go | 435 - .../otel/attribute/type_string.go | 31 - .../otel/attribute/value.go | 271 - .../otel/baggage/baggage.go | 556 - .../otel/baggage/context.go | 39 - .../go.opentelemetry.io/otel/baggage/doc.go | 20 - .../go.opentelemetry.io/otel/codes/codes.go | 106 - vendor/go.opentelemetry.io/otel/codes/doc.go | 21 - vendor/go.opentelemetry.io/otel/doc.go | 34 - .../go.opentelemetry.io/otel/error_handler.go | 38 - .../go.opentelemetry.io/otel/get_main_pkgs.sh | 41 - vendor/go.opentelemetry.io/otel/handler.go | 98 - .../otel/internal/baggage/baggage.go | 43 - .../otel/internal/baggage/context.go | 95 - .../otel/internal/global/internal_logging.go | 63 - .../otel/internal/global/propagator.go | 82 - .../otel/internal/global/state.go | 106 - .../otel/internal/global/trace.go | 192 - .../otel/internal/metric/LICENSE | 201 - .../otel/internal/metric/global/meter.go | 287 - .../otel/internal/metric/global/metric.go | 66 - .../otel/internal/metric/registry/doc.go | 24 - .../otel/internal/metric/registry/registry.go | 139 - .../otel/internal/rawhelpers.go | 55 - .../otel/internal_logging.go | 26 - .../go.opentelemetry.io/otel/metric/LICENSE | 201 - .../go.opentelemetry.io/otel/metric/config.go | 128 - vendor/go.opentelemetry.io/otel/metric/doc.go | 67 - .../otel/metric/global/metric.go | 49 - .../go.opentelemetry.io/otel/metric/metric.go | 538 - .../otel/metric/metric_instrument.go | 464 - .../go.opentelemetry.io/otel/metric/noop.go | 30 - .../otel/metric/number/doc.go | 23 - .../otel/metric/number/kind_string.go | 24 - .../otel/metric/number/number.go | 538 - .../otel/metric/sdkapi/descriptor.go | 70 - .../otel/metric/sdkapi/instrumentkind.go | 80 - .../metric/sdkapi/instrumentkind_string.go | 28 - .../otel/metric/sdkapi/noop.go | 66 - .../otel/metric/sdkapi/sdkapi.go | 159 - .../otel/metric/unit/doc.go | 20 - .../otel/metric/unit/unit.go | 24 - .../go.opentelemetry.io/otel/propagation.go | 31 - .../otel/propagation/baggage.go | 58 - .../otel/propagation/doc.go | 24 - .../otel/propagation/propagation.go | 153 - .../otel/propagation/trace_context.go | 159 - .../otel/semconv/v1.7.0/doc.go | 20 - .../otel/semconv/v1.7.0/exception.go | 20 - .../otel/semconv/v1.7.0/http.go | 312 - .../otel/semconv/v1.7.0/resource.go | 946 -- .../otel/semconv/v1.7.0/schema.go | 20 - .../otel/semconv/v1.7.0/trace.go | 1558 -- vendor/go.opentelemetry.io/otel/trace.go | 44 - vendor/go.opentelemetry.io/otel/trace/LICENSE | 201 - .../go.opentelemetry.io/otel/trace/config.go | 316 - .../go.opentelemetry.io/otel/trace/context.go | 61 - vendor/go.opentelemetry.io/otel/trace/doc.go | 66 - .../otel/trace/nonrecording.go | 27 - vendor/go.opentelemetry.io/otel/trace/noop.go | 89 - .../go.opentelemetry.io/otel/trace/trace.go | 519 - .../otel/trace/tracestate.go | 217 - .../otel/verify_examples.sh | 85 - vendor/go.opentelemetry.io/otel/version.go | 20 - vendor/go.opentelemetry.io/otel/versions.yaml | 60 - vendor/modules.txt | 68 - 333 files changed, 801 insertions(+), 44178 deletions(-) create mode 100644 go.work create mode 100644 go.work.sum create mode 100644 swarmd/go.mod create mode 100644 swarmd/go.sum delete mode 100644 vendor/github.com/docker/docker/api/README.md delete mode 100644 vendor/github.com/docker/docker/api/common.go delete mode 100644 vendor/github.com/docker/docker/api/common_unix.go delete mode 100644 vendor/github.com/docker/docker/api/common_windows.go delete mode 100644 vendor/github.com/docker/docker/api/swagger-gen.yaml delete mode 100644 vendor/github.com/docker/docker/api/swagger.yaml delete mode 100644 vendor/github.com/docker/docker/api/types/blkiodev/blkio.go delete mode 100644 vendor/github.com/docker/docker/api/types/checkpoint/list.go delete mode 100644 vendor/github.com/docker/docker/api/types/checkpoint/options.go delete mode 100644 vendor/github.com/docker/docker/api/types/client.go delete mode 100644 vendor/github.com/docker/docker/api/types/configs.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/change_type.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/change_types.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/config.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_top.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_update.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/create_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/errors.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/filesystem_change.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/wait_exit_error.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/wait_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/waitcondition.go delete mode 100644 vendor/github.com/docker/docker/api/types/error_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/error_response_ext.go delete mode 100644 vendor/github.com/docker/docker/api/types/events/events.go delete mode 100644 vendor/github.com/docker/docker/api/types/filters/errors.go delete mode 100644 vendor/github.com/docker/docker/api/types/filters/parse.go delete mode 100644 vendor/github.com/docker/docker/api/types/graph_driver_data.go delete mode 100644 vendor/github.com/docker/docker/api/types/id_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/image/image_history.go delete mode 100644 vendor/github.com/docker/docker/api/types/image/opts.go delete mode 100644 vendor/github.com/docker/docker/api/types/image_delete_response_item.go delete mode 100644 vendor/github.com/docker/docker/api/types/image_summary.go delete mode 100644 vendor/github.com/docker/docker/api/types/mount/mount.go delete mode 100644 vendor/github.com/docker/docker/api/types/network/ipam.go delete mode 100644 vendor/github.com/docker/docker/api/types/network/network.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_device.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_env.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_interface_type.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_mount.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_responses.go delete mode 100644 vendor/github.com/docker/docker/api/types/port.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/authconfig.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/registry.go delete mode 100644 vendor/github.com/docker/docker/api/types/service_update_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/stats.go delete mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/common.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/config.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/container.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/network.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/node.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/secret.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/service.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/swarm.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/task.go delete mode 100644 vendor/github.com/docker/docker/api/types/system/info.go delete mode 100644 vendor/github.com/docker/docker/api/types/system/runtime.go delete mode 100644 vendor/github.com/docker/docker/api/types/system/security_opts.go delete mode 100644 vendor/github.com/docker/docker/api/types/time/timestamp.go delete mode 100644 vendor/github.com/docker/docker/api/types/types.go delete mode 100644 vendor/github.com/docker/docker/api/types/types_deprecated.go delete mode 100644 vendor/github.com/docker/docker/api/types/versions/README.md delete mode 100644 vendor/github.com/docker/docker/api/types/versions/compare.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/cluster_volume.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/create_options.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/list_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/options.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/volume.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_update.go delete mode 100644 vendor/github.com/docker/docker/client/README.md delete mode 100644 vendor/github.com/docker/docker/client/build_cancel.go delete mode 100644 vendor/github.com/docker/docker/client/build_prune.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_create.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_list.go delete mode 100644 vendor/github.com/docker/docker/client/client.go delete mode 100644 vendor/github.com/docker/docker/client/client_deprecated.go delete mode 100644 vendor/github.com/docker/docker/client/client_unix.go delete mode 100644 vendor/github.com/docker/docker/client/client_windows.go delete mode 100644 vendor/github.com/docker/docker/client/config_create.go delete mode 100644 vendor/github.com/docker/docker/client/config_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/config_list.go delete mode 100644 vendor/github.com/docker/docker/client/config_remove.go delete mode 100644 vendor/github.com/docker/docker/client/config_update.go delete mode 100644 vendor/github.com/docker/docker/client/container_attach.go delete mode 100644 vendor/github.com/docker/docker/client/container_commit.go delete mode 100644 vendor/github.com/docker/docker/client/container_copy.go delete mode 100644 vendor/github.com/docker/docker/client/container_create.go delete mode 100644 vendor/github.com/docker/docker/client/container_diff.go delete mode 100644 vendor/github.com/docker/docker/client/container_exec.go delete mode 100644 vendor/github.com/docker/docker/client/container_export.go delete mode 100644 vendor/github.com/docker/docker/client/container_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/container_kill.go delete mode 100644 vendor/github.com/docker/docker/client/container_list.go delete mode 100644 vendor/github.com/docker/docker/client/container_logs.go delete mode 100644 vendor/github.com/docker/docker/client/container_pause.go delete mode 100644 vendor/github.com/docker/docker/client/container_prune.go delete mode 100644 vendor/github.com/docker/docker/client/container_remove.go delete mode 100644 vendor/github.com/docker/docker/client/container_rename.go delete mode 100644 vendor/github.com/docker/docker/client/container_resize.go delete mode 100644 vendor/github.com/docker/docker/client/container_restart.go delete mode 100644 vendor/github.com/docker/docker/client/container_start.go delete mode 100644 vendor/github.com/docker/docker/client/container_stats.go delete mode 100644 vendor/github.com/docker/docker/client/container_stop.go delete mode 100644 vendor/github.com/docker/docker/client/container_top.go delete mode 100644 vendor/github.com/docker/docker/client/container_unpause.go delete mode 100644 vendor/github.com/docker/docker/client/container_update.go delete mode 100644 vendor/github.com/docker/docker/client/container_wait.go delete mode 100644 vendor/github.com/docker/docker/client/disk_usage.go delete mode 100644 vendor/github.com/docker/docker/client/distribution_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/envvars.go delete mode 100644 vendor/github.com/docker/docker/client/errors.go delete mode 100644 vendor/github.com/docker/docker/client/events.go delete mode 100644 vendor/github.com/docker/docker/client/hijack.go delete mode 100644 vendor/github.com/docker/docker/client/image_build.go delete mode 100644 vendor/github.com/docker/docker/client/image_create.go delete mode 100644 vendor/github.com/docker/docker/client/image_history.go delete mode 100644 vendor/github.com/docker/docker/client/image_import.go delete mode 100644 vendor/github.com/docker/docker/client/image_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/image_list.go delete mode 100644 vendor/github.com/docker/docker/client/image_load.go delete mode 100644 vendor/github.com/docker/docker/client/image_prune.go delete mode 100644 vendor/github.com/docker/docker/client/image_pull.go delete mode 100644 vendor/github.com/docker/docker/client/image_push.go delete mode 100644 vendor/github.com/docker/docker/client/image_remove.go delete mode 100644 vendor/github.com/docker/docker/client/image_save.go delete mode 100644 vendor/github.com/docker/docker/client/image_search.go delete mode 100644 vendor/github.com/docker/docker/client/image_tag.go delete mode 100644 vendor/github.com/docker/docker/client/info.go delete mode 100644 vendor/github.com/docker/docker/client/interface.go delete mode 100644 vendor/github.com/docker/docker/client/interface_experimental.go delete mode 100644 vendor/github.com/docker/docker/client/interface_stable.go delete mode 100644 vendor/github.com/docker/docker/client/login.go delete mode 100644 vendor/github.com/docker/docker/client/network_connect.go delete mode 100644 vendor/github.com/docker/docker/client/network_create.go delete mode 100644 vendor/github.com/docker/docker/client/network_disconnect.go delete mode 100644 vendor/github.com/docker/docker/client/network_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/network_list.go delete mode 100644 vendor/github.com/docker/docker/client/network_prune.go delete mode 100644 vendor/github.com/docker/docker/client/network_remove.go delete mode 100644 vendor/github.com/docker/docker/client/node_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/node_list.go delete mode 100644 vendor/github.com/docker/docker/client/node_remove.go delete mode 100644 vendor/github.com/docker/docker/client/node_update.go delete mode 100644 vendor/github.com/docker/docker/client/options.go delete mode 100644 vendor/github.com/docker/docker/client/ping.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_create.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_disable.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_enable.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_install.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_list.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_push.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_remove.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_set.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_upgrade.go delete mode 100644 vendor/github.com/docker/docker/client/request.go delete mode 100644 vendor/github.com/docker/docker/client/secret_create.go delete mode 100644 vendor/github.com/docker/docker/client/secret_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/secret_list.go delete mode 100644 vendor/github.com/docker/docker/client/secret_remove.go delete mode 100644 vendor/github.com/docker/docker/client/secret_update.go delete mode 100644 vendor/github.com/docker/docker/client/service_create.go delete mode 100644 vendor/github.com/docker/docker/client/service_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/service_list.go delete mode 100644 vendor/github.com/docker/docker/client/service_logs.go delete mode 100644 vendor/github.com/docker/docker/client/service_remove.go delete mode 100644 vendor/github.com/docker/docker/client/service_update.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_init.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_join.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_leave.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_unlock.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_update.go delete mode 100644 vendor/github.com/docker/docker/client/task_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/task_list.go delete mode 100644 vendor/github.com/docker/docker/client/task_logs.go delete mode 100644 vendor/github.com/docker/docker/client/utils.go delete mode 100644 vendor/github.com/docker/docker/client/version.go delete mode 100644 vendor/github.com/docker/docker/client/volume_create.go delete mode 100644 vendor/github.com/docker/docker/client/volume_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/volume_list.go delete mode 100644 vendor/github.com/docker/docker/client/volume_prune.go delete mode 100644 vendor/github.com/docker/docker/client/volume_remove.go delete mode 100644 vendor/github.com/docker/docker/client/volume_update.go delete mode 100644 vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go delete mode 100644 vendor/github.com/docker/docker/internal/multierror/multierror.go delete mode 100644 vendor/github.com/docker/go-connections/nat/nat.go delete mode 100644 vendor/github.com/docker/go-connections/nat/parse.go delete mode 100644 vendor/github.com/docker/go-connections/nat/sort.go delete mode 100644 vendor/github.com/docker/go-units/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/go-units/LICENSE delete mode 100644 vendor/github.com/docker/go-units/MAINTAINERS delete mode 100644 vendor/github.com/docker/go-units/README.md delete mode 100644 vendor/github.com/docker/go-units/circle.yml delete mode 100644 vendor/github.com/docker/go-units/duration.go delete mode 100644 vendor/github.com/docker/go-units/size.go delete mode 100644 vendor/github.com/docker/go-units/ulimit.go delete mode 100644 vendor/github.com/felixge/httpsnoop/.gitignore delete mode 100644 vendor/github.com/felixge/httpsnoop/.travis.yml delete mode 100644 vendor/github.com/felixge/httpsnoop/LICENSE.txt delete mode 100644 vendor/github.com/felixge/httpsnoop/Makefile delete mode 100644 vendor/github.com/felixge/httpsnoop/README.md delete mode 100644 vendor/github.com/felixge/httpsnoop/capture_metrics.go delete mode 100644 vendor/github.com/felixge/httpsnoop/docs.go delete mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go delete mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go delete mode 100644 vendor/github.com/go-logr/logr/funcr/funcr.go delete mode 100644 vendor/github.com/go-logr/stdr/LICENSE delete mode 100644 vendor/github.com/go-logr/stdr/README.md delete mode 100644 vendor/github.com/go-logr/stdr/stdr.go delete mode 100644 vendor/github.com/opencontainers/image-spec/LICENSE delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/version.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/versioned.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go delete mode 100644 vendor/go.opentelemetry.io/otel/.gitattributes delete mode 100644 vendor/go.opentelemetry.io/otel/.gitignore delete mode 100644 vendor/go.opentelemetry.io/otel/.gitmodules delete mode 100644 vendor/go.opentelemetry.io/otel/.golangci.yml delete mode 100644 vendor/go.opentelemetry.io/otel/.markdown-link.json delete mode 100644 vendor/go.opentelemetry.io/otel/.markdownlint.yaml delete mode 100644 vendor/go.opentelemetry.io/otel/CHANGELOG.md delete mode 100644 vendor/go.opentelemetry.io/otel/CODEOWNERS delete mode 100644 vendor/go.opentelemetry.io/otel/CONTRIBUTING.md delete mode 100644 vendor/go.opentelemetry.io/otel/LICENSE delete mode 100644 vendor/go.opentelemetry.io/otel/Makefile delete mode 100644 vendor/go.opentelemetry.io/otel/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/RELEASING.md delete mode 100644 vendor/go.opentelemetry.io/otel/VERSIONING.md delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/encoder.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/iterator.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/key.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/kv.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/set.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/type_string.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/value.go delete mode 100644 vendor/go.opentelemetry.io/otel/baggage/baggage.go delete mode 100644 vendor/go.opentelemetry.io/otel/baggage/context.go delete mode 100644 vendor/go.opentelemetry.io/otel/baggage/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/codes/codes.go delete mode 100644 vendor/go.opentelemetry.io/otel/codes/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/error_handler.go delete mode 100644 vendor/go.opentelemetry.io/otel/get_main_pkgs.sh delete mode 100644 vendor/go.opentelemetry.io/otel/handler.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/context.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/global/propagator.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/global/state.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/global/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/metric/LICENSE delete mode 100644 vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/rawhelpers.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal_logging.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/LICENSE delete mode 100644 vendor/go.opentelemetry.io/otel/metric/config.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/global/metric.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/metric.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/metric_instrument.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/noop.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/number/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/number/kind_string.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/number/number.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/unit/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/unit/unit.go delete mode 100644 vendor/go.opentelemetry.io/otel/propagation.go delete mode 100644 vendor/go.opentelemetry.io/otel/propagation/baggage.go delete mode 100644 vendor/go.opentelemetry.io/otel/propagation/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/propagation/propagation.go delete mode 100644 vendor/go.opentelemetry.io/otel/propagation/trace_context.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.7.0/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.7.0/exception.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.7.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.7.0/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/LICENSE delete mode 100644 vendor/go.opentelemetry.io/otel/trace/config.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/context.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/nonrecording.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/noop.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/tracestate.go delete mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh delete mode 100644 vendor/go.opentelemetry.io/otel/version.go delete mode 100644 vendor/go.opentelemetry.io/otel/versions.yaml diff --git a/go.mod b/go.mod index 0e3bfa3057..1c144c67bc 100644 --- a/go.mod +++ b/go.mod @@ -10,11 +10,10 @@ require ( github.com/container-storage-interface/spec v1.2.0 github.com/distribution/reference v0.5.0 github.com/docker/docker v24.0.0-rc.2.0.20230908212318-6ce5aa1cd5a4+incompatible // master (v25.0.0-dev) - github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea + github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c github.com/docker/go-metrics v0.0.1 - github.com/docker/go-units v0.5.0 - github.com/dustin/go-humanize v1.0.0 + github.com/dustin/go-humanize v1.0.0 // indirect github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.3 @@ -23,15 +22,14 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.27.6 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.1.0-rc5 github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_golang v1.14.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a github.com/rexray/gocsi v1.2.2 // replaced; see replace rules for the version used. github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 - github.com/spf13/pflag v1.0.5 + github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.8.4 go.etcd.io/bbolt v1.3.7 go.etcd.io/etcd/client/pkg/v3 v3.5.6 @@ -64,10 +62,8 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/containerd v1.6.22 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-logr/logr v1.2.4 // indirect - github.com/go-logr/stdr v1.2.2 // indirect github.com/google/certificate-transparency-go v1.1.4 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -76,8 +72,6 @@ require ( github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2 // indirect github.com/jmoiron/sqlx v1.3.3 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/moby/term v0.5.0 // indirect - github.com/morikuni/aec v1.0.0 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect @@ -86,11 +80,6 @@ require ( github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b // indirect github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc // indirect github.com/zmap/zlint/v3 v3.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect - go.opentelemetry.io/otel v1.4.1 // indirect - go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect - go.opentelemetry.io/otel/metric v0.27.0 // indirect - go.opentelemetry.io/otel/trace v1.4.1 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect diff --git a/go.sum b/go.sum index f7bc706bf1..d76b8b04ff 100644 --- a/go.sum +++ b/go.sum @@ -13,7 +13,6 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy code.cloudfoundry.org/clock v1.1.0 h1:XLzC6W3Ah/Y7ht1rmZ6+QfPdt1iGWEAAtIZXgiaj57c= code.cloudfoundry.org/clock v1.1.0/go.mod h1:yA3fxddT9RINQL2XHS7PS+OXxKCGhfrZmlNUCIM6AKo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= @@ -41,7 +40,6 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= @@ -92,8 +90,6 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dperny/gocsi v1.2.3-pre h1:GRTvl8G6yEXYPyul1h6YAqtyxzUHTrQHo6G3xZpb9oM= github.com/dperny/gocsi v1.2.3-pre/go.mod h1:qQw5mIunz1RqMUfZcGJ9/Lt9EDaL0N3wPNYxFTuyLQo= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= @@ -107,9 +103,6 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee h1:v6Eju/FhxsACGNipFEPBZZAzGr1F/jlRQr1qiBw2nEE= github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee/go.mod h1:2H9hjfbpSMHwY503FclkV/lZTBh2YlOmLLSda12uL8c= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -127,11 +120,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= @@ -180,7 +170,6 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -199,7 +188,6 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= @@ -278,14 +266,10 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -307,8 +291,6 @@ github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+q github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= -github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -433,30 +415,13 @@ go.etcd.io/etcd/server/v3 v3.5.6/go.mod h1:6/Gfe8XTGXQJgLYQ65oGKMfPivb2EASLUSMSW go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 h1:SLme4Porm+UwX0DdHMxlwRt7FzPSE0sys81bet2o0pU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0/go.mod h1:tLYsuf2v8fZreBVwp9gVMhefZlLFZaUiNVSq8QxXRII= go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= -go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk= -go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g= -go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 h1:R/OBkMoGgfy2fLhs2QhkCI1w4HLEQX92GCcJB6SSdNk= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 h1:giGm8w67Ja7amYNfYMdme7xSp2pIxThWopw8+QP51Yk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0 h1:Ydage/P0fRrSPpZeCVxzjqGcI6iVmG2xb43+IR8cjqM= -go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk= -go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= -go.opentelemetry.io/otel/metric v0.27.0 h1:HhJPsGhJoKRSegPQILFbODU56NS/L1UE4fS1sC5kIwQ= -go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g= go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= -go.opentelemetry.io/otel/sdk v1.3.0 h1:3278edCoH89MEJ0Ky8WQXVmDQv3FX4ZJ3Pp+9fJreAI= go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= -go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= -go.opentelemetry.io/otel/trace v1.4.1 h1:O+16qcdTrT7zxv2J6GejTPFinSwA++cYerC5iSiF8EQ= -go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= -go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= diff --git a/go.work b/go.work new file mode 100644 index 0000000000..8a882135a5 --- /dev/null +++ b/go.work @@ -0,0 +1,6 @@ +go 1.18 + +use ( + . + ./swarmd +) diff --git a/go.work.sum b/go.work.sum new file mode 100644 index 0000000000..901cdf599d --- /dev/null +++ b/go.work.sum @@ -0,0 +1 @@ +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= diff --git a/swarmd/go.mod b/swarmd/go.mod new file mode 100644 index 0000000000..fdcdebd5a5 --- /dev/null +++ b/swarmd/go.mod @@ -0,0 +1,99 @@ +module github.com/moby/swarmkit/v2/swarmd + +go 1.18 + +require ( + code.cloudfoundry.org/clock v1.1.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/cloudflare/cfssl v1.6.4 + github.com/container-storage-interface/spec v1.2.0 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/docker v24.0.0-rc.2.0.20230908212318-6ce5aa1cd5a4+incompatible // master (v25.0.0-dev) + github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea + github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect + github.com/docker/go-metrics v0.0.1 // indirect + github.com/docker/go-units v0.5.0 + github.com/dustin/go-humanize v1.0.0 + github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee // indirect + github.com/gogo/protobuf v1.3.2 + github.com/golang/protobuf v1.5.3 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 + github.com/hashicorp/go-memdb v1.3.2 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.14.0 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/cobra v1.7.0 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.8.4 + go.etcd.io/bbolt v1.3.7 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.6 + go.etcd.io/etcd/pkg/v3 v3.5.6 // indirect + go.etcd.io/etcd/raft/v3 v3.5.6 + go.etcd.io/etcd/server/v3 v3.5.6 + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/time v0.3.0 + + // NOTE(dperny,cyli): there is some error handling, found in the + // (*firstSessionErrorTracker).SessionClosed method in node/node.go, which + // relies on string matching to handle x509 errors. between grpc versions 1.3.0 + // and 1.7.5, the error string we were matching changed, breaking swarmkit. + // In 1.10.x, GRPC stopped surfacing those errors entirely, breaking swarmkit. + // In >=1.11, those errors were brought back but the string had changed again. + // After updating GRPC, if integration test failures occur, verify that the + // string matching there is correct. + // + // For details, see: + // + // - https://github.com/moby/swarmkit/commit/4343384f11737119c3fa1524da2cb2707c70e04a + // - https://github.com/moby/swarmkit/commit/8a2b6fd64944bcef8154ced28f90aeec6abfeb04 + google.golang.org/grpc v1.53.0 +) + +require github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261 + +require ( + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/containerd/containerd v1.6.22 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/certificate-transparency-go v1.1.4 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2 // indirect + github.com/jmoiron/sqlx v1.3.3 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect + github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b // indirect + github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc // indirect + github.com/zmap/zlint/v3 v3.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect + go.opentelemetry.io/otel v1.4.1 // indirect + go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect + go.opentelemetry.io/otel/metric v0.27.0 // indirect + go.opentelemetry.io/otel/trace v1.4.1 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.21.0 // indirect + golang.org/x/mod v0.11.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/tools v0.10.0 // indirect + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/klog/v2 v2.90.1 // indirect +) + +// Removes etcd dependency +replace github.com/rexray/gocsi => github.com/dperny/gocsi v1.2.3-pre diff --git a/swarmd/go.sum b/swarmd/go.sum new file mode 100644 index 0000000000..f9c1b5e5c4 --- /dev/null +++ b/swarmd/go.sum @@ -0,0 +1,691 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +code.cloudfoundry.org/clock v1.1.0 h1:XLzC6W3Ah/Y7ht1rmZ6+QfPdt1iGWEAAtIZXgiaj57c= +code.cloudfoundry.org/clock v1.1.0/go.mod h1:yA3fxddT9RINQL2XHS7PS+OXxKCGhfrZmlNUCIM6AKo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/akutz/gosync v0.1.0 h1:naxPT/aDYDh79PMwM3XmencmNQeYmpNFSZy4ZE9zIW0= +github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/cfssl v1.6.4 h1:NMOvfrEjFfC63K3SGXgAnFdsgkmiq4kATme5BfcqrO8= +github.com/cloudflare/cfssl v1.6.4/go.mod h1:8b3CQMxfWPAeom3zBnGJ6sd+G1NkL5TXqmDXacb+1J0= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/container-storage-interface/spec v1.2.0 h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s= +github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/containerd/containerd v1.6.22 h1:rGTIBxPJusM0evF6wKgIzuD+tV70nmx9eEjzHVm1JzI= +github.com/containerd/containerd v1.6.22/go.mod h1:BQAJdahvGz8xboAvxKg9hsDYIovn79Ea318anowQ1/o= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v24.0.0-rc.2.0.20230908212318-6ce5aa1cd5a4+incompatible h1:sqXunZ6IkpGcR9Kgj8R4MaCXNBZYhlcXxe4BGC8tJAI= +github.com/docker/docker v24.0.0-rc.2.0.20230908212318-6ce5aa1cd5a4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea h1:+4n+kUVbPdu6qMI9SUnSKMC+D50gNW4L7Lhk9tI2lVo= +github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dperny/gocsi v1.2.3-pre h1:GRTvl8G6yEXYPyul1h6YAqtyxzUHTrQHo6G3xZpb9oM= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee h1:v6Eju/FhxsACGNipFEPBZZAzGr1F/jlRQr1qiBw2nEE= +github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee/go.mod h1:2H9hjfbpSMHwY503FclkV/lZTBh2YlOmLLSda12uL8c= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/certificate-transparency-go v1.1.4 h1:hCyXHDbtqlr/lMXU0D4WgbalXL0Zk4dSWWMbPV8VrqY= +github.com/google/certificate-transparency-go v1.1.4/go.mod h1:D6lvbfwckhNrbM9WVl1EVeMOyzC19mpIjMOI4nxBHtQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-memdb v1.3.2 h1:RBKHOsnSszpU6vxq80LzC2BaQjuuvoyaQbkLTf7V7g8= +github.com/hashicorp/go-memdb v1.3.2/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2 h1:i2fYnDurfLlJH8AyyMOnkLHnHeP8Ff/DDpuZA/D3bPo= +github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= +github.com/jmoiron/sqlx v1.3.3 h1:j82X0bf7oQ27XeqxicSZsTU5suPwKElg3oyxNn43iTk= +github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261 h1:mjLf2jYrqtIS4LvLzg0gNyJR4rMXS4X5Bg1A4hOhVMs= +github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261/go.mod h1:oRJU1d0hrkkwCtouwfQGcIAKcVEkclMYoLWocqrg6gI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee h1:P6U24L02WMfj9ymZTxl7CxS73JC99x3ukk+DBkgQGQs= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/weppos/publicsuffix-go v0.13.1-0.20210123135404-5fd73613514e/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= +github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b h1:FsyNrX12e5BkplJq7wKOLk0+C6LZ+KGXvuEcKUYm5ss= +github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= +github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= +github.com/zmap/zcrypto v0.0.0-20210123152837-9cf5beac6d91/go.mod h1:R/deQh6+tSWlgI9tb4jNmXxn8nSCabl5ZQsBX9//I/E= +github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc h1:zkGwegkOW709y0oiAraH/3D8njopUR/pARHv4tZZ6pw= +github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc/go.mod h1:FM4U1E3NzlNMRnSUTU3P1UdukWhYGifqEsjk9fn7BCk= +github.com/zmap/zlint/v3 v3.1.0 h1:WjVytZo79m/L1+/Mlphl09WBob6YTGljN5IGWZFpAv0= +github.com/zmap/zlint/v3 v3.1.0/go.mod h1:L7t8s3sEKkb0A2BxGy1IWrxt1ZATa1R4QfJZaQOD3zU= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= +go.etcd.io/etcd/client/pkg/v3 v3.5.6 h1:TXQWYceBKqLp4sa87rcPs11SXxUA/mHwH975v+BDvLU= +go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= +go.etcd.io/etcd/client/v2 v2.305.6/go.mod h1:BHha8XJGe8vCIBfWBpbBLVZ4QjOIlfoouvOwydu63E0= +go.etcd.io/etcd/client/v3 v3.5.6/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk= +go.etcd.io/etcd/pkg/v3 v3.5.6 h1:k1GZrGrfMHy5/cg2bxNGsmLTFisatyhDYCFLRuaavWg= +go.etcd.io/etcd/pkg/v3 v3.5.6/go.mod h1:qATwUzDb6MLyGWq2nUj+jwXqZJcxkCuabh0P7Cuff3k= +go.etcd.io/etcd/raft/v3 v3.5.6 h1:tOmx6Ym6rn2GpZOrvTGJZciJHek6RnC3U/zNInzIN50= +go.etcd.io/etcd/raft/v3 v3.5.6/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0= +go.etcd.io/etcd/server/v3 v3.5.6 h1:RXuwaB8AMiV62TqcqIt4O4bG8NWjsxOkDJVT3MZI5Ds= +go.etcd.io/etcd/server/v3 v3.5.6/go.mod h1:6/Gfe8XTGXQJgLYQ65oGKMfPivb2EASLUSMSWN9Sroo= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 h1:SLme4Porm+UwX0DdHMxlwRt7FzPSE0sys81bet2o0pU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0/go.mod h1:tLYsuf2v8fZreBVwp9gVMhefZlLFZaUiNVSq8QxXRII= +go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk= +go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g= +go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 h1:R/OBkMoGgfy2fLhs2QhkCI1w4HLEQX92GCcJB6SSdNk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 h1:giGm8w67Ja7amYNfYMdme7xSp2pIxThWopw8+QP51Yk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0 h1:Ydage/P0fRrSPpZeCVxzjqGcI6iVmG2xb43+IR8cjqM= +go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk= +go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= +go.opentelemetry.io/otel/metric v0.27.0 h1:HhJPsGhJoKRSegPQILFbODU56NS/L1UE4fS1sC5kIwQ= +go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g= +go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +go.opentelemetry.io/otel/sdk v1.3.0 h1:3278edCoH89MEJ0Ky8WQXVmDQv3FX4ZJ3Pp+9fJreAI= +go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= +go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= +go.opentelemetry.io/otel/trace v1.4.1 h1:O+16qcdTrT7zxv2J6GejTPFinSwA++cYerC5iSiF8EQ= +go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= +go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= +golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md deleted file mode 100644 index f136c3433a..0000000000 --- a/vendor/github.com/docker/docker/api/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Working on the Engine API - -The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. - -It consists of various components in this repository: - -- `api/swagger.yaml` A Swagger definition of the API. -- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. -- `cli/` The command-line client. -- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. -- `daemon/` The daemon, which serves the API. - -## Swagger definition - -The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: - -1. Automatically generate documentation. -2. Automatically generate the Go server and client. (A work-in-progress.) -3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. - -## Updating the API documentation - -The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. - -The file is split into two main sections: - -- `definitions`, which defines re-usable objects used in requests and responses -- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) - -To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. - -There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). - -`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. - -## Viewing the API documentation - -When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. - -Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. - -The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go deleted file mode 100644 index 37e553d418..0000000000 --- a/vendor/github.com/docker/docker/api/common.go +++ /dev/null @@ -1,11 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// Common constants for daemon and client. -const ( - // DefaultVersion of Current REST API - DefaultVersion = "1.44" - - // NoBaseImageSpecifier is the symbol used by the FROM - // command to specify that no base image is to be used. - NoBaseImageSpecifier = "scratch" -) diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go deleted file mode 100644 index 0f77352d7e..0000000000 --- a/vendor/github.com/docker/docker/api/common_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build !windows - -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go deleted file mode 100644 index 590ba5479b..0000000000 --- a/vendor/github.com/docker/docker/api/common_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -// Technically the first daemon API version released on Windows is v1.25 in -// engine version 1.13. However, some clients are explicitly using downlevel -// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. -// Hence also allowing 1.24 on Windows. -const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml deleted file mode 100644 index f07a02737f..0000000000 --- a/vendor/github.com/docker/docker/api/swagger-gen.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -layout: - models: - - name: definition - source: asset:model - target: "{{ joinFilePath .Target .ModelPackage }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" - operations: - - name: handler - source: asset:serverOperation - target: "{{ joinFilePath .Target .APIPackage .Package }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml deleted file mode 100644 index 960ab1f951..0000000000 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ /dev/null @@ -1,12183 +0,0 @@ -# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. -# -# This is used for generating API documentation and the types used by the -# client/server. See api/README.md for more information. -# -# Some style notes: -# - This file is used by ReDoc, which allows GitHub Flavored Markdown in -# descriptions. -# - There is no maximum line length, for ease of editing and pretty diffs. -# - operationIds are in the format "NounVerb", with a singular noun. - -swagger: "2.0" -schemes: - - "http" - - "https" -produces: - - "application/json" - - "text/plain" -consumes: - - "application/json" - - "text/plain" -basePath: "/v1.44" -info: - title: "Docker Engine API" - version: "1.44" - x-logo: - url: "https://docs.docker.com/assets/images/logo-docker-main.png" - description: | - The Engine API is an HTTP API served by Docker Engine. It is the API the - Docker client uses to communicate with the Engine, so everything the Docker - client can do can be done with the API. - - Most of the client's commands map directly to API endpoints (e.g. `docker ps` - is `GET /containers/json`). The notable exception is running containers, - which consists of several API calls. - - # Errors - - The API uses standard HTTP status codes to indicate the success or failure - of the API call. The body of the response will be JSON in the following - format: - - ``` - { - "message": "page not found" - } - ``` - - # Versioning - - The API is usually changed in each release, so API calls are versioned to - ensure that clients don't break. To lock to a specific version of the API, - you prefix the URL with its version, for example, call `/v1.30/info` to use - the v1.30 version of the `/info` endpoint. If the API version specified in - the URL is not supported by the daemon, a HTTP `400 Bad Request` error message - is returned. - - If you omit the version-prefix, the current version of the API (v1.44) is used. - For example, calling `/info` is the same as calling `/v1.44/info`. Using the - API without a version-prefix is deprecated and will be removed in a future release. - - Engine releases in the near future should support this version of the API, - so your client will continue to work even if it is talking to a newer Engine. - - The API uses an open schema model, which means server may add extra properties - to responses. Likewise, the server will ignore any extra query parameters and - request body properties. When you write clients, you need to ignore additional - properties in responses to ensure they do not break when talking to newer - daemons. - - - # Authentication - - Authentication for registries is handled client side. The client has to send - authentication details to various endpoints that need to communicate with - registries, such as `POST /images/(name)/push`. These are sent as - `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) - (JSON) string with the following structure: - - ``` - { - "username": "string", - "password": "string", - "email": "string", - "serveraddress": "string" - } - ``` - - The `serveraddress` is a domain/IP without a protocol. Throughout this - structure, double quotes are required. - - If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), - you can just pass this instead of credentials: - - ``` - { - "identitytoken": "9cbaf023786cd7..." - } - ``` - -# The tags on paths define the menu sections in the ReDoc documentation, so -# the usage of tags must make sense for that: -# - They should be singular, not plural. -# - There should not be too many tags, or the menu becomes unwieldy. For -# example, it is preferable to add a path to the "System" tag instead of -# creating a tag with a single path in it. -# - The order of tags in this list defines the order in the menu. -tags: - # Primary objects - - name: "Container" - x-displayName: "Containers" - description: | - Create and manage containers. - - name: "Image" - x-displayName: "Images" - - name: "Network" - x-displayName: "Networks" - description: | - Networks are user-defined networks that containers can be attached to. - See the [networking documentation](https://docs.docker.com/network/) - for more information. - - name: "Volume" - x-displayName: "Volumes" - description: | - Create and manage persistent storage that can be attached to containers. - - name: "Exec" - x-displayName: "Exec" - description: | - Run new commands inside running containers. Refer to the - [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) - for more information. - - To exec a command in a container, you first need to create an exec instance, - then start it. These two API endpoints are wrapped up in a single command-line - command, `docker exec`. - - # Swarm things - - name: "Swarm" - x-displayName: "Swarm" - description: | - Engines can be clustered together in a swarm. Refer to the - [swarm mode documentation](https://docs.docker.com/engine/swarm/) - for more information. - - name: "Node" - x-displayName: "Nodes" - description: | - Nodes are instances of the Engine participating in a swarm. Swarm mode - must be enabled for these endpoints to work. - - name: "Service" - x-displayName: "Services" - description: | - Services are the definitions of tasks to run on a swarm. Swarm mode must - be enabled for these endpoints to work. - - name: "Task" - x-displayName: "Tasks" - description: | - A task is a container running on a swarm. It is the atomic scheduling unit - of swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Secret" - x-displayName: "Secrets" - description: | - Secrets are sensitive data that can be used by services. Swarm mode must - be enabled for these endpoints to work. - - name: "Config" - x-displayName: "Configs" - description: | - Configs are application configurations that can be used by services. Swarm - mode must be enabled for these endpoints to work. - # System things - - name: "Plugin" - x-displayName: "Plugins" - - name: "System" - x-displayName: "System" - -definitions: - Port: - type: "object" - description: "An open port on a container" - required: [PrivatePort, Type] - properties: - IP: - type: "string" - format: "ip-address" - description: "Host IP address that the container's port is mapped to" - PrivatePort: - type: "integer" - format: "uint16" - x-nullable: false - description: "Port on the container" - PublicPort: - type: "integer" - format: "uint16" - description: "Port exposed on the host" - Type: - type: "string" - x-nullable: false - enum: ["tcp", "udp", "sctp"] - example: - PrivatePort: 8080 - PublicPort: 80 - Type: "tcp" - - MountPoint: - type: "object" - description: | - MountPoint represents a mount point configuration inside the container. - This is used for reporting the mountpoints in use by a container. - properties: - Type: - description: | - The mount type: - - - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - - `npipe` a named pipe from the host into the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" - example: "volume" - Name: - description: | - Name is the name reference to the underlying data defined by `Source` - e.g., the volume name. - type: "string" - example: "myvolume" - Source: - description: | - Source location of the mount. - - For volumes, this contains the storage location of the volume (within - `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains - the source (host) part of the bind-mount. For `tmpfs` mount points, this - field is empty. - type: "string" - example: "/var/lib/docker/volumes/myvolume/_data" - Destination: - description: | - Destination is the path relative to the container root (`/`) where - the `Source` is mounted inside the container. - type: "string" - example: "/usr/share/nginx/html/" - Driver: - description: | - Driver is the volume driver used to create the volume (if it is a volume). - type: "string" - example: "local" - Mode: - description: | - Mode is a comma separated list of options supplied by the user when - creating the bind/volume mount. - - The default is platform-specific (`"z"` on Linux, empty on Windows). - type: "string" - example: "z" - RW: - description: | - Whether the mount is mounted writable (read-write). - type: "boolean" - example: true - Propagation: - description: | - Propagation describes how mounts are propagated from the host into the - mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) - for details. This field is not used on Windows. - type: "string" - example: "" - - DeviceMapping: - type: "object" - description: "A device mapping between the host and container" - properties: - PathOnHost: - type: "string" - PathInContainer: - type: "string" - CgroupPermissions: - type: "string" - example: - PathOnHost: "/dev/deviceName" - PathInContainer: "/dev/deviceName" - CgroupPermissions: "mrw" - - DeviceRequest: - type: "object" - description: "A request for devices to be sent to device drivers" - properties: - Driver: - type: "string" - example: "nvidia" - Count: - type: "integer" - example: -1 - DeviceIDs: - type: "array" - items: - type: "string" - example: - - "0" - - "1" - - "GPU-fef8089b-4820-abfc-e83e-94318197576e" - Capabilities: - description: | - A list of capabilities; an OR list of AND lists of capabilities. - type: "array" - items: - type: "array" - items: - type: "string" - example: - # gpu AND nvidia AND compute - - ["gpu", "nvidia", "compute"] - Options: - description: | - Driver-specific options, specified as a key/value pairs. These options - are passed directly to the driver. - type: "object" - additionalProperties: - type: "string" - - ThrottleDevice: - type: "object" - properties: - Path: - description: "Device path" - type: "string" - Rate: - description: "Rate" - type: "integer" - format: "int64" - minimum: 0 - - Mount: - type: "object" - properties: - Target: - description: "Container path." - type: "string" - Source: - description: "Mount source (e.g. a volume name, a host path)." - type: "string" - Type: - description: | - The mount type. Available types: - - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" - ReadOnly: - description: "Whether the mount should be read-only." - type: "boolean" - Consistency: - description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." - type: "string" - BindOptions: - description: "Optional configuration for the `bind` type." - type: "object" - properties: - Propagation: - description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." - type: "string" - enum: - - "private" - - "rprivate" - - "shared" - - "rshared" - - "slave" - - "rslave" - NonRecursive: - description: "Disable recursive bind mount." - type: "boolean" - default: false - CreateMountpoint: - description: "Create mount point on host if missing" - type: "boolean" - default: false - ReadOnlyNonRecursive: - description: | - Make the mount non-recursively read-only, but still leave the mount recursive - (unless NonRecursive is set to true in conjunction). - type: "boolean" - default: false - ReadOnlyForceRecursive: - description: "Raise an error if the mount cannot be made recursively read-only." - type: "boolean" - default: false - VolumeOptions: - description: "Optional configuration for the `volume` type." - type: "object" - properties: - NoCopy: - description: "Populate volume with data from the target." - type: "boolean" - default: false - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - DriverConfig: - description: "Map of driver specific options" - type: "object" - properties: - Name: - description: "Name of the driver to use to create the volume." - type: "string" - Options: - description: "key/value map of driver specific options." - type: "object" - additionalProperties: - type: "string" - TmpfsOptions: - description: "Optional configuration for the `tmpfs` type." - type: "object" - properties: - SizeBytes: - description: "The size for the tmpfs mount in bytes." - type: "integer" - format: "int64" - Mode: - description: "The permission mode for the tmpfs mount in an integer." - type: "integer" - - RestartPolicy: - description: | - The behavior to apply when the container exits. The default is not to - restart. - - An ever increasing delay (double the previous delay, starting at 100ms) is - added before each restart to prevent flooding the server. - type: "object" - properties: - Name: - type: "string" - description: | - - Empty string means not to restart - - `no` Do not automatically restart - - `always` Always restart - - `unless-stopped` Restart always except when the user has manually stopped the container - - `on-failure` Restart only when the container exit code is non-zero - enum: - - "" - - "no" - - "always" - - "unless-stopped" - - "on-failure" - MaximumRetryCount: - type: "integer" - description: | - If `on-failure` is used, the number of times to retry before giving up. - - Resources: - description: "A container's resources (cgroups config, ulimits, etc)" - type: "object" - properties: - # Applicable to all platforms - CpuShares: - description: | - An integer value representing this container's relative CPU weight - versus other containers. - type: "integer" - Memory: - description: "Memory limit in bytes." - type: "integer" - format: "int64" - default: 0 - # Applicable to UNIX platforms - CgroupParent: - description: | - Path to `cgroups` under which the container's `cgroup` is created. If - the path is not absolute, the path is considered to be relative to the - `cgroups` path of the init process. Cgroups are created if they do not - already exist. - type: "string" - BlkioWeight: - description: "Block IO weight (relative weight)." - type: "integer" - minimum: 0 - maximum: 1000 - BlkioWeightDevice: - description: | - Block IO weight (relative device weight) in the form: - - ``` - [{"Path": "device_path", "Weight": weight}] - ``` - type: "array" - items: - type: "object" - properties: - Path: - type: "string" - Weight: - type: "integer" - minimum: 0 - BlkioDeviceReadBps: - description: | - Limit read rate (bytes per second) from a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteBps: - description: | - Limit write rate (bytes per second) to a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceReadIOps: - description: | - Limit read rate (IO per second) from a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteIOps: - description: | - Limit write rate (IO per second) to a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - CpuPeriod: - description: "The length of a CPU period in microseconds." - type: "integer" - format: "int64" - CpuQuota: - description: | - Microseconds of CPU time that the container can get in a CPU period. - type: "integer" - format: "int64" - CpuRealtimePeriod: - description: | - The length of a CPU real-time period in microseconds. Set to 0 to - allocate no time allocated to real-time tasks. - type: "integer" - format: "int64" - CpuRealtimeRuntime: - description: | - The length of a CPU real-time runtime in microseconds. Set to 0 to - allocate no time allocated to real-time tasks. - type: "integer" - format: "int64" - CpusetCpus: - description: | - CPUs in which to allow execution (e.g., `0-3`, `0,1`). - type: "string" - example: "0-3" - CpusetMems: - description: | - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only - effective on NUMA systems. - type: "string" - Devices: - description: "A list of devices to add to the container." - type: "array" - items: - $ref: "#/definitions/DeviceMapping" - DeviceCgroupRules: - description: "a list of cgroup rules to apply to the container" - type: "array" - items: - type: "string" - example: "c 13:* rwm" - DeviceRequests: - description: | - A list of requests for devices to be sent to device drivers. - type: "array" - items: - $ref: "#/definitions/DeviceRequest" - KernelMemoryTCP: - description: | - Hard limit for kernel TCP buffer memory (in bytes). Depending on the - OCI runtime in use, this option may be ignored. It is no longer supported - by the default (runc) runtime. - - This field is omitted when empty. - type: "integer" - format: "int64" - MemoryReservation: - description: "Memory soft limit in bytes." - type: "integer" - format: "int64" - MemorySwap: - description: | - Total memory limit (memory + swap). Set as `-1` to enable unlimited - swap. - type: "integer" - format: "int64" - MemorySwappiness: - description: | - Tune a container's memory swappiness behavior. Accepts an integer - between 0 and 100. - type: "integer" - format: "int64" - minimum: 0 - maximum: 100 - NanoCpus: - description: "CPU quota in units of 10-9 CPUs." - type: "integer" - format: "int64" - OomKillDisable: - description: "Disable OOM Killer for the container." - type: "boolean" - Init: - description: | - Run an init inside the container that forwards signals and reaps - processes. This field is omitted if empty, and the default (as - configured on the daemon) is used. - type: "boolean" - x-nullable: true - PidsLimit: - description: | - Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` - to not change. - type: "integer" - format: "int64" - x-nullable: true - Ulimits: - description: | - A list of resource limits to set in the container. For example: - - ``` - {"Name": "nofile", "Soft": 1024, "Hard": 2048} - ``` - type: "array" - items: - type: "object" - properties: - Name: - description: "Name of ulimit" - type: "string" - Soft: - description: "Soft limit" - type: "integer" - Hard: - description: "Hard limit" - type: "integer" - # Applicable to Windows - CpuCount: - description: | - The number of usable CPUs (Windows only). - - On Windows Server containers, the processor resource controls are - mutually exclusive. The order of precedence is `CPUCount` first, then - `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - CpuPercent: - description: | - The usable percentage of the available CPUs (Windows only). - - On Windows Server containers, the processor resource controls are - mutually exclusive. The order of precedence is `CPUCount` first, then - `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - IOMaximumIOps: - description: "Maximum IOps for the container system drive (Windows only)" - type: "integer" - format: "int64" - IOMaximumBandwidth: - description: | - Maximum IO in bytes per second for the container system drive - (Windows only). - type: "integer" - format: "int64" - - Limit: - description: | - An object describing a limit on resources which can be requested by a task. - type: "object" - properties: - NanoCPUs: - type: "integer" - format: "int64" - example: 4000000000 - MemoryBytes: - type: "integer" - format: "int64" - example: 8272408576 - Pids: - description: | - Limits the maximum number of PIDs in the container. Set `0` for unlimited. - type: "integer" - format: "int64" - default: 0 - example: 100 - - ResourceObject: - description: | - An object describing the resources which can be advertised by a node and - requested by a task. - type: "object" - properties: - NanoCPUs: - type: "integer" - format: "int64" - example: 4000000000 - MemoryBytes: - type: "integer" - format: "int64" - example: 8272408576 - GenericResources: - $ref: "#/definitions/GenericResources" - - GenericResources: - description: | - User-defined resources can be either Integer resources (e.g, `SSD=3`) or - String resources (e.g, `GPU=UUID1`). - type: "array" - items: - type: "object" - properties: - NamedResourceSpec: - type: "object" - properties: - Kind: - type: "string" - Value: - type: "string" - DiscreteResourceSpec: - type: "object" - properties: - Kind: - type: "string" - Value: - type: "integer" - format: "int64" - example: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - - HealthConfig: - description: "A test to perform to check that the container is healthy." - type: "object" - properties: - Test: - description: | - The test to perform. Possible values are: - - - `[]` inherit healthcheck from image or parent image - - `["NONE"]` disable healthcheck - - `["CMD", args...]` exec arguments directly - - `["CMD-SHELL", command]` run command with system's default shell - type: "array" - items: - type: "string" - Interval: - description: | - The time to wait between checks in nanoseconds. It should be 0 or at - least 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - Timeout: - description: | - The time to wait before considering the check to have hung. It should - be 0 or at least 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - Retries: - description: | - The number of consecutive failures needed to consider a container as - unhealthy. 0 means inherit. - type: "integer" - StartPeriod: - description: | - Start period for the container to initialize before starting - health-retries countdown in nanoseconds. It should be 0 or at least - 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - StartInterval: - description: | - The time to wait between checks in nanoseconds during the start period. - It should be 0 or at least 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - - Health: - description: | - Health stores information about the container's healthcheck results. - type: "object" - x-nullable: true - properties: - Status: - description: | - Status is one of `none`, `starting`, `healthy` or `unhealthy` - - - "none" Indicates there is no healthcheck - - "starting" Starting indicates that the container is not yet ready - - "healthy" Healthy indicates that the container is running correctly - - "unhealthy" Unhealthy indicates that the container has a problem - type: "string" - enum: - - "none" - - "starting" - - "healthy" - - "unhealthy" - example: "healthy" - FailingStreak: - description: "FailingStreak is the number of consecutive failures" - type: "integer" - example: 0 - Log: - type: "array" - description: | - Log contains the last few results (oldest first) - items: - $ref: "#/definitions/HealthcheckResult" - - HealthcheckResult: - description: | - HealthcheckResult stores information about a single run of a healthcheck probe - type: "object" - x-nullable: true - properties: - Start: - description: | - Date and time at which this check started in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "date-time" - example: "2020-01-04T10:44:24.496525531Z" - End: - description: | - Date and time at which this check ended in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2020-01-04T10:45:21.364524523Z" - ExitCode: - description: | - ExitCode meanings: - - - `0` healthy - - `1` unhealthy - - `2` reserved (considered unhealthy) - - other values: error running probe - type: "integer" - example: 0 - Output: - description: "Output from last check" - type: "string" - - HostConfig: - description: "Container configuration that depends on the host we are running on" - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - # Applicable to all platforms - Binds: - type: "array" - description: | - A list of volume bindings for this container. Each volume binding - is a string in one of these forms: - - - `host-src:container-dest[:options]` to bind-mount a host path - into the container. Both `host-src`, and `container-dest` must - be an _absolute_ path. - - `volume-name:container-dest[:options]` to bind-mount a volume - managed by a volume driver into the container. `container-dest` - must be an _absolute_ path. - - `options` is an optional, comma-delimited list of: - - - `nocopy` disables automatic copying of data from the container - path to the volume. The `nocopy` flag only applies to named volumes. - - `[ro|rw]` mounts a volume read-only or read-write, respectively. - If omitted or set to `rw`, volumes are mounted read-write. - - `[z|Z]` applies SELinux labels to allow or deny multiple containers - to read and write to the same volume. - - `z`: a _shared_ content label is applied to the content. This - label indicates that multiple containers can share the volume - content, for both reading and writing. - - `Z`: a _private unshared_ label is applied to the content. - This label indicates that only the current container can use - a private volume. Labeling systems such as SELinux require - proper labels to be placed on volume content that is mounted - into a container. Without a label, the security system can - prevent a container's processes from using the content. By - default, the labels set by the host operating system are not - modified. - - `[[r]shared|[r]slave|[r]private]` specifies mount - [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). - This only applies to bind-mounted volumes, not internal volumes - or named volumes. Mount propagation requires the source mount - point (the location where the source directory is mounted in the - host operating system) to have the correct propagation properties. - For shared volumes, the source mount point must be set to `shared`. - For slave volumes, the mount must be set to either `shared` or - `slave`. - items: - type: "string" - ContainerIDFile: - type: "string" - description: "Path to a file where the container ID is written" - LogConfig: - type: "object" - description: "The logging configuration for this container" - properties: - Type: - type: "string" - enum: - - "json-file" - - "syslog" - - "journald" - - "gelf" - - "fluentd" - - "awslogs" - - "splunk" - - "etwlogs" - - "none" - Config: - type: "object" - additionalProperties: - type: "string" - NetworkMode: - type: "string" - description: | - Network mode to use for this container. Supported standard values - are: `bridge`, `host`, `none`, and `container:`. Any - other value is taken as a custom network's name to which this - container should connect to. - PortBindings: - $ref: "#/definitions/PortMap" - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - AutoRemove: - type: "boolean" - description: | - Automatically remove the container when the container's process - exits. This has no effect if `RestartPolicy` is set. - VolumeDriver: - type: "string" - description: "Driver that this container uses to mount volumes." - VolumesFrom: - type: "array" - description: | - A list of volumes to inherit from another container, specified in - the form `[:]`. - items: - type: "string" - Mounts: - description: | - Specification for mounts to be added to the container. - type: "array" - items: - $ref: "#/definitions/Mount" - ConsoleSize: - type: "array" - description: | - Initial console size, as an `[height, width]` array. - x-nullable: true - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - Annotations: - type: "object" - description: | - Arbitrary non-identifying metadata attached to container and - provided to the runtime when the container is started. - additionalProperties: - type: "string" - - # Applicable to UNIX platforms - CapAdd: - type: "array" - description: | - A list of kernel capabilities to add to the container. Conflicts - with option 'Capabilities'. - items: - type: "string" - CapDrop: - type: "array" - description: | - A list of kernel capabilities to drop from the container. Conflicts - with option 'Capabilities'. - items: - type: "string" - CgroupnsMode: - type: "string" - enum: - - "private" - - "host" - description: | - cgroup namespace mode for the container. Possible values are: - - - `"private"`: the container runs in its own private cgroup namespace - - `"host"`: use the host system's cgroup namespace - - If not specified, the daemon default is used, which can either be `"private"` - or `"host"`, depending on daemon version, kernel support and configuration. - Dns: - type: "array" - description: "A list of DNS servers for the container to use." - items: - type: "string" - DnsOptions: - type: "array" - description: "A list of DNS options." - items: - type: "string" - DnsSearch: - type: "array" - description: "A list of DNS search domains." - items: - type: "string" - ExtraHosts: - type: "array" - description: | - A list of hostnames/IP mappings to add to the container's `/etc/hosts` - file. Specified in the form `["hostname:IP"]`. - items: - type: "string" - GroupAdd: - type: "array" - description: | - A list of additional groups that the container process will run as. - items: - type: "string" - IpcMode: - type: "string" - description: | - IPC sharing mode for the container. Possible values are: - - - `"none"`: own private IPC namespace, with /dev/shm not mounted - - `"private"`: own private IPC namespace - - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - - `"container:"`: join another (shareable) container's IPC namespace - - `"host"`: use the host system's IPC namespace - - If not specified, daemon default is used, which can either be `"private"` - or `"shareable"`, depending on daemon version and configuration. - Cgroup: - type: "string" - description: "Cgroup to use for the container." - Links: - type: "array" - description: | - A list of links for the container in the form `container_name:alias`. - items: - type: "string" - OomScoreAdj: - type: "integer" - description: | - An integer value containing the score given to the container in - order to tune OOM killer preferences. - example: 500 - PidMode: - type: "string" - description: | - Set the PID (Process) Namespace mode for the container. It can be - either: - - - `"container:"`: joins another container's PID namespace - - `"host"`: use the host's PID namespace inside the container - Privileged: - type: "boolean" - description: "Gives the container full access to the host." - PublishAllPorts: - type: "boolean" - description: | - Allocates an ephemeral host port for all of a container's - exposed ports. - - Ports are de-allocated when the container stops and allocated when - the container starts. The allocated port might be changed when - restarting the container. - - The port is selected from the ephemeral port range that depends on - the kernel. For example, on Linux the range is defined by - `/proc/sys/net/ipv4/ip_local_port_range`. - ReadonlyRootfs: - type: "boolean" - description: "Mount the container's root filesystem as read only." - SecurityOpt: - type: "array" - description: | - A list of string values to customize labels for MLS systems, such - as SELinux. - items: - type: "string" - StorageOpt: - type: "object" - description: | - Storage driver options for this container, in the form `{"size": "120G"}`. - additionalProperties: - type: "string" - Tmpfs: - type: "object" - description: | - A map of container directories which should be replaced by tmpfs - mounts, and their corresponding mount options. For example: - - ``` - { "/run": "rw,noexec,nosuid,size=65536k" } - ``` - additionalProperties: - type: "string" - UTSMode: - type: "string" - description: "UTS namespace to use for the container." - UsernsMode: - type: "string" - description: | - Sets the usernamespace mode for the container when usernamespace - remapping option is enabled. - ShmSize: - type: "integer" - format: "int64" - description: | - Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. - minimum: 0 - Sysctls: - type: "object" - description: | - A list of kernel parameters (sysctls) to set in the container. - For example: - - ``` - {"net.ipv4.ip_forward": "1"} - ``` - additionalProperties: - type: "string" - Runtime: - type: "string" - description: "Runtime to use with this container." - # Applicable to Windows - Isolation: - type: "string" - description: | - Isolation technology of the container. (Windows only) - enum: - - "default" - - "process" - - "hyperv" - MaskedPaths: - type: "array" - description: | - The list of paths to be masked inside the container (this overrides - the default set of paths). - items: - type: "string" - ReadonlyPaths: - type: "array" - description: | - The list of paths to be set as read-only inside the container - (this overrides the default set of paths). - items: - type: "string" - - ContainerConfig: - description: | - Configuration for a container that is portable between hosts. - - When used as `ContainerConfig` field in an image, `ContainerConfig` is an - optional field containing the configuration of the container that was last - committed when creating the image. - - Previous versions of Docker builder used this field to store build cache, - and it is not in active use anymore. - type: "object" - properties: - Hostname: - description: | - The hostname to use for the container, as a valid RFC 1123 hostname. - type: "string" - example: "439f4e91bd1d" - Domainname: - description: | - The domain name to use for the container. - type: "string" - User: - description: "The user that commands are run as inside the container." - type: "string" - AttachStdin: - description: "Whether to attach to `stdin`." - type: "boolean" - default: false - AttachStdout: - description: "Whether to attach to `stdout`." - type: "boolean" - default: true - AttachStderr: - description: "Whether to attach to `stderr`." - type: "boolean" - default: true - ExposedPorts: - description: | - An object mapping ports to an empty object in the form: - - `{"/": {}}` - type: "object" - x-nullable: true - additionalProperties: - type: "object" - enum: - - {} - default: {} - example: { - "80/tcp": {}, - "443/tcp": {} - } - Tty: - description: | - Attach standard streams to a TTY, including `stdin` if it is not closed. - type: "boolean" - default: false - OpenStdin: - description: "Open `stdin`" - type: "boolean" - default: false - StdinOnce: - description: "Close `stdin` after one attached client disconnects" - type: "boolean" - default: false - Env: - description: | - A list of environment variables to set inside the container in the - form `["VAR=value", ...]`. A variable without `=` is removed from the - environment, rather than to have an empty value. - type: "array" - items: - type: "string" - example: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Cmd: - description: | - Command to run specified as a string or an array of strings. - type: "array" - items: - type: "string" - example: ["/bin/sh"] - Healthcheck: - $ref: "#/definitions/HealthConfig" - ArgsEscaped: - description: "Command is already escaped (Windows only)" - type: "boolean" - default: false - example: false - x-nullable: true - Image: - description: | - The name (or reference) of the image to use when creating the container, - or which was used when the container was created. - type: "string" - example: "example-image:1.0" - Volumes: - description: | - An object mapping mount point paths inside the container to empty - objects. - type: "object" - additionalProperties: - type: "object" - enum: - - {} - default: {} - WorkingDir: - description: "The working directory for commands to run in." - type: "string" - example: "/public/" - Entrypoint: - description: | - The entry point for the container as a string or an array of strings. - - If the array consists of exactly one empty string (`[""]`) then the - entry point is reset to system default (i.e., the entry point used by - docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). - type: "array" - items: - type: "string" - example: [] - NetworkDisabled: - description: "Disable networking for the container." - type: "boolean" - x-nullable: true - MacAddress: - description: "MAC address of the container." - type: "string" - x-nullable: true - OnBuild: - description: | - `ONBUILD` metadata that were defined in the image's `Dockerfile`. - type: "array" - x-nullable: true - items: - type: "string" - example: [] - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - StopSignal: - description: | - Signal to stop a container as a string or unsigned integer. - type: "string" - example: "SIGTERM" - x-nullable: true - StopTimeout: - description: "Timeout to stop a container in seconds." - type: "integer" - default: 10 - x-nullable: true - Shell: - description: | - Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. - type: "array" - x-nullable: true - items: - type: "string" - example: ["/bin/sh", "-c"] - - NetworkingConfig: - description: | - NetworkingConfig represents the container's networking configuration for - each of its interfaces. - It is used for the networking configs specified in the `docker create` - and `docker network connect` commands. - type: "object" - properties: - EndpointsConfig: - description: | - A mapping of network name to endpoint configuration for that network. - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - example: - # putting an example here, instead of using the example values from - # /definitions/EndpointSettings, because containers/create currently - # does not support attaching to multiple networks, so the example request - # would be confusing if it showed that multiple networks can be contained - # in the EndpointsConfig. - # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) - EndpointsConfig: - isolated_nw: - IPAMConfig: - IPv4Address: "172.20.30.33" - IPv6Address: "2001:db8:abcd::3033" - LinkLocalIPs: - - "169.254.34.68" - - "fe80::3468" - Links: - - "container_1" - - "container_2" - Aliases: - - "server_x" - - "server_y" - - NetworkSettings: - description: "NetworkSettings exposes the network settings in the API" - type: "object" - properties: - Bridge: - description: Name of the network's bridge (for example, `docker0`). - type: "string" - example: "docker0" - SandboxID: - description: SandboxID uniquely represents a container's network stack. - type: "string" - example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" - HairpinMode: - description: | - Indicates if hairpin NAT should be enabled on the virtual interface. - type: "boolean" - example: false - LinkLocalIPv6Address: - description: IPv6 unicast address using the link-local prefix. - type: "string" - example: "fe80::42:acff:fe11:1" - LinkLocalIPv6PrefixLen: - description: Prefix length of the IPv6 unicast address. - type: "integer" - example: "64" - Ports: - $ref: "#/definitions/PortMap" - SandboxKey: - description: SandboxKey identifies the sandbox - type: "string" - example: "/var/run/docker/netns/8ab54b426c38" - - # TODO is SecondaryIPAddresses actually used? - SecondaryIPAddresses: - description: "" - type: "array" - items: - $ref: "#/definitions/Address" - x-nullable: true - - # TODO is SecondaryIPv6Addresses actually used? - SecondaryIPv6Addresses: - description: "" - type: "array" - items: - $ref: "#/definitions/Address" - x-nullable: true - - # TODO properties below are part of DefaultNetworkSettings, which is - # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 - EndpointID: - description: | - EndpointID uniquely represents a service endpoint in a Sandbox. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" - Gateway: - description: | - Gateway address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "172.17.0.1" - GlobalIPv6Address: - description: | - Global IPv6 address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "2001:db8::5689" - GlobalIPv6PrefixLen: - description: | - Mask length of the global IPv6 address. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "integer" - example: 64 - IPAddress: - description: | - IPv4 address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "172.17.0.4" - IPPrefixLen: - description: | - Mask length of the IPv4 address. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "integer" - example: 16 - IPv6Gateway: - description: | - IPv6 gateway address for this network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "2001:db8:2::100" - MacAddress: - description: | - MAC address for the container on the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "02:42:ac:11:00:04" - Networks: - description: | - Information about all networks that the container is connected to. - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - - Address: - description: Address represents an IPv4 or IPv6 IP address. - type: "object" - properties: - Addr: - description: IP address. - type: "string" - PrefixLen: - description: Mask length of the IP address. - type: "integer" - - PortMap: - description: | - PortMap describes the mapping of container ports to host ports, using the - container's port-number and protocol as key in the format `/`, - for example, `80/udp`. - - If a container's port is mapped for multiple protocols, separate entries - are added to the mapping table. - type: "object" - additionalProperties: - type: "array" - x-nullable: true - items: - $ref: "#/definitions/PortBinding" - example: - "443/tcp": - - HostIp: "127.0.0.1" - HostPort: "4443" - "80/tcp": - - HostIp: "0.0.0.0" - HostPort: "80" - - HostIp: "0.0.0.0" - HostPort: "8080" - "80/udp": - - HostIp: "0.0.0.0" - HostPort: "80" - "53/udp": - - HostIp: "0.0.0.0" - HostPort: "53" - "2377/tcp": null - - PortBinding: - description: | - PortBinding represents a binding between a host IP address and a host - port. - type: "object" - properties: - HostIp: - description: "Host IP address that the container's port is mapped to." - type: "string" - example: "127.0.0.1" - HostPort: - description: "Host port number that the container's port is mapped to." - type: "string" - example: "4443" - - GraphDriverData: - description: | - Information about the storage driver used to store the container's and - image's filesystem. - type: "object" - required: [Name, Data] - properties: - Name: - description: "Name of the storage driver." - type: "string" - x-nullable: false - example: "overlay2" - Data: - description: | - Low-level storage metadata, provided as key/value pairs. - - This information is driver-specific, and depends on the storage-driver - in use, and should be used for informational purposes only. - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: { - "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", - "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", - "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" - } - - FilesystemChange: - description: | - Change in the container's filesystem. - type: "object" - required: [Path, Kind] - properties: - Path: - description: | - Path to file or directory that has changed. - type: "string" - x-nullable: false - Kind: - $ref: "#/definitions/ChangeType" - - ChangeType: - description: | - Kind of change - - Can be one of: - - - `0`: Modified ("C") - - `1`: Added ("A") - - `2`: Deleted ("D") - type: "integer" - format: "uint8" - enum: [0, 1, 2] - x-nullable: false - - ImageInspect: - description: | - Information about an image in the local image cache. - type: "object" - properties: - Id: - description: | - ID is the content-addressable ID of an image. - - This identifier is a content-addressable digest calculated from the - image's configuration (which includes the digests of layers used by - the image). - - Note that this digest differs from the `RepoDigests` below, which - holds digests of image manifests that reference the image. - type: "string" - x-nullable: false - example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" - RepoTags: - description: | - List of image names/tags in the local image cache that reference this - image. - - Multiple image tags can refer to the same image, and this list may be - empty if no tags reference the image, in which case the image is - "untagged", in which case it can still be referenced by its ID. - type: "array" - items: - type: "string" - example: - - "example:1.0" - - "example:latest" - - "example:stable" - - "internal.registry.example.com:5000/example:1.0" - RepoDigests: - description: | - List of content-addressable digests of locally available image manifests - that the image is referenced from. Multiple manifests can refer to the - same image. - - These digests are usually only available if the image was either pulled - from a registry, or if the image was pushed to a registry, which is when - the manifest is generated and its digest calculated. - type: "array" - items: - type: "string" - example: - - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" - - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" - Parent: - description: | - ID of the parent image. - - Depending on how the image was created, this field may be empty and - is only set for images that were built/created locally. This field - is empty if the image was pulled from an image registry. - type: "string" - x-nullable: false - example: "" - Comment: - description: | - Optional message that was set when committing or importing the image. - type: "string" - x-nullable: false - example: "" - Created: - description: | - Date and time at which the image was created, formatted in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - x-nullable: false - example: "2022-02-04T21:20:12.497794809Z" - Container: - description: | - The ID of the container that was used to create the image. - - Depending on how the image was created, this field may be empty. - type: "string" - x-nullable: false - example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" - ContainerConfig: - $ref: "#/definitions/ContainerConfig" - DockerVersion: - description: | - The version of Docker that was used to build the image. - - Depending on how the image was created, this field may be empty. - type: "string" - x-nullable: false - example: "20.10.7" - Author: - description: | - Name of the author that was specified when committing the image, or as - specified through MAINTAINER (deprecated) in the Dockerfile. - type: "string" - x-nullable: false - example: "" - Config: - $ref: "#/definitions/ContainerConfig" - Architecture: - description: | - Hardware CPU architecture that the image runs on. - type: "string" - x-nullable: false - example: "arm" - Variant: - description: | - CPU architecture variant (presently ARM-only). - type: "string" - x-nullable: true - example: "v7" - Os: - description: | - Operating System the image is built to run on. - type: "string" - x-nullable: false - example: "linux" - OsVersion: - description: | - Operating System version the image is built to run on (especially - for Windows). - type: "string" - example: "" - x-nullable: true - Size: - description: | - Total size of the image including all layers it is composed of. - type: "integer" - format: "int64" - x-nullable: false - example: 1239828 - VirtualSize: - description: | - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 1239828 - GraphDriver: - $ref: "#/definitions/GraphDriverData" - RootFS: - description: | - Information about the image's RootFS, including the layer IDs. - type: "object" - required: [Type] - properties: - Type: - type: "string" - x-nullable: false - example: "layers" - Layers: - type: "array" - items: - type: "string" - example: - - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - Metadata: - description: | - Additional metadata of the image in the local cache. This information - is local to the daemon, and not part of the image itself. - type: "object" - properties: - LastTagTime: - description: | - Date and time at which the image was last tagged in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - - This information is only available if the image was tagged locally, - and omitted otherwise. - type: "string" - format: "dateTime" - example: "2022-02-28T14:40:02.623929178Z" - x-nullable: true - ImageSummary: - type: "object" - required: - - Id - - ParentId - - RepoTags - - RepoDigests - - Created - - Size - - SharedSize - - Labels - - Containers - properties: - Id: - description: | - ID is the content-addressable ID of an image. - - This identifier is a content-addressable digest calculated from the - image's configuration (which includes the digests of layers used by - the image). - - Note that this digest differs from the `RepoDigests` below, which - holds digests of image manifests that reference the image. - type: "string" - x-nullable: false - example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" - ParentId: - description: | - ID of the parent image. - - Depending on how the image was created, this field may be empty and - is only set for images that were built/created locally. This field - is empty if the image was pulled from an image registry. - type: "string" - x-nullable: false - example: "" - RepoTags: - description: | - List of image names/tags in the local image cache that reference this - image. - - Multiple image tags can refer to the same image, and this list may be - empty if no tags reference the image, in which case the image is - "untagged", in which case it can still be referenced by its ID. - type: "array" - x-nullable: false - items: - type: "string" - example: - - "example:1.0" - - "example:latest" - - "example:stable" - - "internal.registry.example.com:5000/example:1.0" - RepoDigests: - description: | - List of content-addressable digests of locally available image manifests - that the image is referenced from. Multiple manifests can refer to the - same image. - - These digests are usually only available if the image was either pulled - from a registry, or if the image was pushed to a registry, which is when - the manifest is generated and its digest calculated. - type: "array" - x-nullable: false - items: - type: "string" - example: - - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" - - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" - Created: - description: | - Date and time at which the image was created as a Unix timestamp - (number of seconds sinds EPOCH). - type: "integer" - x-nullable: false - example: "1644009612" - Size: - description: | - Total size of the image including all layers it is composed of. - type: "integer" - format: "int64" - x-nullable: false - example: 172064416 - SharedSize: - description: | - Total size of image layers that are shared between this image and other - images. - - This size is not calculated by default. `-1` indicates that the value - has not been set / calculated. - type: "integer" - format: "int64" - x-nullable: false - example: 1239828 - VirtualSize: - description: |- - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 172064416 - Labels: - description: "User-defined key/value metadata." - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Containers: - description: | - Number of containers using this image. Includes both stopped and running - containers. - - This size is not calculated by default, and depends on which API endpoint - is used. `-1` indicates that the value has not been set / calculated. - x-nullable: false - type: "integer" - example: 2 - - AuthConfig: - type: "object" - properties: - username: - type: "string" - password: - type: "string" - email: - type: "string" - serveraddress: - type: "string" - example: - username: "hannibal" - password: "xxxx" - serveraddress: "https://index.docker.io/v1/" - - ProcessConfig: - type: "object" - properties: - privileged: - type: "boolean" - user: - type: "string" - tty: - type: "boolean" - entrypoint: - type: "string" - arguments: - type: "array" - items: - type: "string" - - Volume: - type: "object" - required: [Name, Driver, Mountpoint, Labels, Scope, Options] - properties: - Name: - type: "string" - description: "Name of the volume." - x-nullable: false - example: "tardis" - Driver: - type: "string" - description: "Name of the volume driver used by the volume." - x-nullable: false - example: "custom" - Mountpoint: - type: "string" - description: "Mount path of the volume on the host." - x-nullable: false - example: "/var/lib/docker/volumes/tardis" - CreatedAt: - type: "string" - format: "dateTime" - description: "Date/Time the volume was created." - example: "2016-06-07T20:31:11.853781916Z" - Status: - type: "object" - description: | - Low-level details about the volume, provided by the volume driver. - Details are returned as a map with key/value pairs: - `{"key":"value","key2":"value2"}`. - - The `Status` field is optional, and is omitted if the volume driver - does not support this feature. - additionalProperties: - type: "object" - example: - hello: "world" - Labels: - type: "object" - description: "User-defined key/value metadata." - x-nullable: false - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: - type: "string" - description: | - The level at which the volume exists. Either `global` for cluster-wide, - or `local` for machine level. - default: "local" - x-nullable: false - enum: ["local", "global"] - example: "local" - ClusterVolume: - $ref: "#/definitions/ClusterVolume" - Options: - type: "object" - description: | - The driver specific options used when creating the volume. - additionalProperties: - type: "string" - example: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - UsageData: - type: "object" - x-nullable: true - x-go-name: "UsageData" - required: [Size, RefCount] - description: | - Usage details about the volume. This information is used by the - `GET /system/df` endpoint, and omitted in other endpoints. - properties: - Size: - type: "integer" - format: "int64" - default: -1 - description: | - Amount of disk space used by the volume (in bytes). This information - is only available for volumes created with the `"local"` volume - driver. For volumes created with other volume drivers, this field - is set to `-1` ("not available") - x-nullable: false - RefCount: - type: "integer" - format: "int64" - default: -1 - description: | - The number of containers referencing this volume. This field - is set to `-1` if the reference-count is not available. - x-nullable: false - - VolumeCreateOptions: - description: "Volume configuration" - type: "object" - title: "VolumeConfig" - x-go-name: "CreateOptions" - properties: - Name: - description: | - The new volume's name. If not specified, Docker generates a name. - type: "string" - x-nullable: false - example: "tardis" - Driver: - description: "Name of the volume driver to use." - type: "string" - default: "local" - x-nullable: false - example: "custom" - DriverOpts: - description: | - A mapping of driver options and values. These options are - passed directly to the driver and are driver specific. - type: "object" - additionalProperties: - type: "string" - example: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - ClusterVolumeSpec: - $ref: "#/definitions/ClusterVolumeSpec" - - VolumeListResponse: - type: "object" - title: "VolumeListResponse" - x-go-name: "ListResponse" - description: "Volume list response" - properties: - Volumes: - type: "array" - description: "List of volumes" - items: - $ref: "#/definitions/Volume" - Warnings: - type: "array" - description: | - Warnings that occurred when fetching the list of volumes. - items: - type: "string" - example: [] - - Network: - type: "object" - properties: - Name: - type: "string" - Id: - type: "string" - Created: - type: "string" - format: "dateTime" - Scope: - type: "string" - Driver: - type: "string" - EnableIPv6: - type: "boolean" - IPAM: - $ref: "#/definitions/IPAM" - Internal: - type: "boolean" - Attachable: - type: "boolean" - Ingress: - type: "boolean" - Containers: - type: "object" - additionalProperties: - $ref: "#/definitions/NetworkContainer" - Options: - type: "object" - additionalProperties: - type: "string" - Labels: - type: "object" - additionalProperties: - type: "string" - example: - Name: "net01" - Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" - Created: "2016-10-19T04:33:30.360899459Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - IPAM: - Driver: "default" - Config: - - Subnet: "172.19.0.0/16" - Gateway: "172.19.0.1" - Options: - foo: "bar" - Internal: false - Attachable: false - Ingress: false - Containers: - 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: - Name: "test" - EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" - MacAddress: "02:42:ac:13:00:02" - IPv4Address: "172.19.0.2/16" - IPv6Address: "" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - IPAM: - type: "object" - properties: - Driver: - description: "Name of the IPAM driver to use." - type: "string" - default: "default" - Config: - description: | - List of IPAM configuration options, specified as a map: - - ``` - {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } - ``` - type: "array" - items: - $ref: "#/definitions/IPAMConfig" - Options: - description: "Driver-specific options, specified as a map." - type: "object" - additionalProperties: - type: "string" - - IPAMConfig: - type: "object" - properties: - Subnet: - type: "string" - IPRange: - type: "string" - Gateway: - type: "string" - AuxiliaryAddresses: - type: "object" - additionalProperties: - type: "string" - - NetworkContainer: - type: "object" - properties: - Name: - type: "string" - EndpointID: - type: "string" - MacAddress: - type: "string" - IPv4Address: - type: "string" - IPv6Address: - type: "string" - - BuildInfo: - type: "object" - properties: - id: - type: "string" - stream: - type: "string" - error: - type: "string" - errorDetail: - $ref: "#/definitions/ErrorDetail" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - aux: - $ref: "#/definitions/ImageID" - - BuildCache: - type: "object" - description: | - BuildCache contains information about a build cache record. - properties: - ID: - type: "string" - description: | - Unique ID of the build cache record. - example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" - Parents: - description: | - List of parent build cache record IDs. - type: "array" - items: - type: "string" - x-nullable: true - example: ["hw53o5aio51xtltp5xjp8v7fx"] - Type: - type: "string" - description: | - Cache record type. - example: "regular" - # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 - enum: - - "internal" - - "frontend" - - "source.local" - - "source.git.checkout" - - "exec.cachemount" - - "regular" - Description: - type: "string" - description: | - Description of the build-step that produced the build cache. - example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" - InUse: - type: "boolean" - description: | - Indicates if the build cache is in use. - example: false - Shared: - type: "boolean" - description: | - Indicates if the build cache is shared. - example: true - Size: - description: | - Amount of disk space used by the build cache (in bytes). - type: "integer" - example: 51 - CreatedAt: - description: | - Date and time at which the build cache was created in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - LastUsedAt: - description: | - Date and time at which the build cache was last used in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - x-nullable: true - example: "2017-08-09T07:09:37.632105588Z" - UsageCount: - type: "integer" - example: 26 - - ImageID: - type: "object" - description: "Image ID or Digest" - properties: - ID: - type: "string" - example: - ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" - - CreateImageInfo: - type: "object" - properties: - id: - type: "string" - error: - type: "string" - errorDetail: - $ref: "#/definitions/ErrorDetail" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - - PushImageInfo: - type: "object" - properties: - error: - type: "string" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - - ErrorDetail: - type: "object" - properties: - code: - type: "integer" - message: - type: "string" - - ProgressDetail: - type: "object" - properties: - current: - type: "integer" - total: - type: "integer" - - ErrorResponse: - description: "Represents an error." - type: "object" - required: ["message"] - properties: - message: - description: "The error message." - type: "string" - x-nullable: false - example: - message: "Something went wrong." - - IdResponse: - description: "Response to an API call that returns just an Id" - type: "object" - required: ["Id"] - properties: - Id: - description: "The id of the newly created object." - type: "string" - x-nullable: false - - EndpointSettings: - description: "Configuration for a network endpoint." - type: "object" - properties: - # Configurations - IPAMConfig: - $ref: "#/definitions/EndpointIPAMConfig" - Links: - type: "array" - items: - type: "string" - example: - - "container_1" - - "container_2" - Aliases: - type: "array" - items: - type: "string" - example: - - "server_x" - - "server_y" - - # Operational data - NetworkID: - description: | - Unique ID of the network. - type: "string" - example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" - EndpointID: - description: | - Unique ID for the service endpoint in a Sandbox. - type: "string" - example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" - Gateway: - description: | - Gateway address for this network. - type: "string" - example: "172.17.0.1" - IPAddress: - description: | - IPv4 address. - type: "string" - example: "172.17.0.4" - IPPrefixLen: - description: | - Mask length of the IPv4 address. - type: "integer" - example: 16 - IPv6Gateway: - description: | - IPv6 gateway address. - type: "string" - example: "2001:db8:2::100" - GlobalIPv6Address: - description: | - Global IPv6 address. - type: "string" - example: "2001:db8::5689" - GlobalIPv6PrefixLen: - description: | - Mask length of the global IPv6 address. - type: "integer" - format: "int64" - example: 64 - MacAddress: - description: | - MAC address for the endpoint on this network. - type: "string" - example: "02:42:ac:11:00:04" - DriverOpts: - description: | - DriverOpts is a mapping of driver options and values. These options - are passed directly to the driver and are driver specific. - type: "object" - x-nullable: true - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - - EndpointIPAMConfig: - description: | - EndpointIPAMConfig represents an endpoint's IPAM configuration. - type: "object" - x-nullable: true - properties: - IPv4Address: - type: "string" - example: "172.20.30.33" - IPv6Address: - type: "string" - example: "2001:db8:abcd::3033" - LinkLocalIPs: - type: "array" - items: - type: "string" - example: - - "169.254.34.68" - - "fe80::3468" - - PluginMount: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Source, Destination, Type, Options] - properties: - Name: - type: "string" - x-nullable: false - example: "some-mount" - Description: - type: "string" - x-nullable: false - example: "This is a mount that's used by the plugin." - Settable: - type: "array" - items: - type: "string" - Source: - type: "string" - example: "/var/lib/docker/plugins/" - Destination: - type: "string" - x-nullable: false - example: "/mnt/state" - Type: - type: "string" - x-nullable: false - example: "bind" - Options: - type: "array" - items: - type: "string" - example: - - "rbind" - - "rw" - - PluginDevice: - type: "object" - required: [Name, Description, Settable, Path] - x-nullable: false - properties: - Name: - type: "string" - x-nullable: false - Description: - type: "string" - x-nullable: false - Settable: - type: "array" - items: - type: "string" - Path: - type: "string" - example: "/dev/fuse" - - PluginEnv: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - Description: - x-nullable: false - type: "string" - Settable: - type: "array" - items: - type: "string" - Value: - type: "string" - - PluginInterfaceType: - type: "object" - x-nullable: false - required: [Prefix, Capability, Version] - properties: - Prefix: - type: "string" - x-nullable: false - Capability: - type: "string" - x-nullable: false - Version: - type: "string" - x-nullable: false - - PluginPrivilege: - description: | - Describes a permission the user has to accept upon installing - the plugin. - type: "object" - x-go-name: "PluginPrivilege" - properties: - Name: - type: "string" - example: "network" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - "host" - - Plugin: - description: "A plugin for the Engine API" - type: "object" - required: [Settings, Enabled, Config, Name] - properties: - Id: - type: "string" - example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" - Name: - type: "string" - x-nullable: false - example: "tiborvass/sample-volume-plugin" - Enabled: - description: - True if the plugin is running. False if the plugin is not running, - only installed. - type: "boolean" - x-nullable: false - example: true - Settings: - description: "Settings that can be modified by users." - type: "object" - x-nullable: false - required: [Args, Devices, Env, Mounts] - properties: - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - type: "string" - example: - - "DEBUG=0" - Args: - type: "array" - items: - type: "string" - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PluginReference: - description: "plugin remote reference used to push/pull the plugin" - type: "string" - x-nullable: false - example: "localhost:5000/tiborvass/sample-volume-plugin:latest" - Config: - description: "The config of a plugin." - type: "object" - x-nullable: false - required: - - Description - - Documentation - - Interface - - Entrypoint - - WorkDir - - Network - - Linux - - PidHost - - PropagatedMount - - IpcHost - - Mounts - - Env - - Args - properties: - DockerVersion: - description: "Docker Version used to create the plugin" - type: "string" - x-nullable: false - example: "17.06.0-ce" - Description: - type: "string" - x-nullable: false - example: "A sample volume plugin for Docker" - Documentation: - type: "string" - x-nullable: false - example: "https://docs.docker.com/engine/extend/plugins/" - Interface: - description: "The interface between Docker and the plugin" - x-nullable: false - type: "object" - required: [Types, Socket] - properties: - Types: - type: "array" - items: - $ref: "#/definitions/PluginInterfaceType" - example: - - "docker.volumedriver/1.0" - Socket: - type: "string" - x-nullable: false - example: "plugins.sock" - ProtocolScheme: - type: "string" - example: "some.protocol/v1.0" - description: "Protocol to use for clients connecting to the plugin." - enum: - - "" - - "moby.plugins.http/v1" - Entrypoint: - type: "array" - items: - type: "string" - example: - - "/usr/bin/sample-volume-plugin" - - "/data" - WorkDir: - type: "string" - x-nullable: false - example: "/bin/" - User: - type: "object" - x-nullable: false - properties: - UID: - type: "integer" - format: "uint32" - example: 1000 - GID: - type: "integer" - format: "uint32" - example: 1000 - Network: - type: "object" - x-nullable: false - required: [Type] - properties: - Type: - x-nullable: false - type: "string" - example: "host" - Linux: - type: "object" - x-nullable: false - required: [Capabilities, AllowAllDevices, Devices] - properties: - Capabilities: - type: "array" - items: - type: "string" - example: - - "CAP_SYS_ADMIN" - - "CAP_SYSLOG" - AllowAllDevices: - type: "boolean" - x-nullable: false - example: false - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PropagatedMount: - type: "string" - x-nullable: false - example: "/mnt/volumes" - IpcHost: - type: "boolean" - x-nullable: false - example: false - PidHost: - type: "boolean" - x-nullable: false - example: false - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - $ref: "#/definitions/PluginEnv" - example: - - Name: "DEBUG" - Description: "If set, prints debug messages" - Settable: null - Value: "0" - Args: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - example: "args" - Description: - x-nullable: false - type: "string" - example: "command line arguments" - Settable: - type: "array" - items: - type: "string" - Value: - type: "array" - items: - type: "string" - rootfs: - type: "object" - properties: - type: - type: "string" - example: "layers" - diff_ids: - type: "array" - items: - type: "string" - example: - - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" - - ObjectVersion: - description: | - The version number of the object such as node, service, etc. This is needed - to avoid conflicting writes. The client must send the version number along - with the modified specification when updating these objects. - - This approach ensures safe concurrency and determinism in that the change - on the object may not be applied if the version number has changed from the - last read. In other words, if two update requests specify the same base - version, only one of the requests can succeed. As a result, two separate - update requests that happen at the same time will not unintentionally - overwrite each other. - type: "object" - properties: - Index: - type: "integer" - format: "uint64" - example: 373531 - - NodeSpec: - type: "object" - properties: - Name: - description: "Name for the node." - type: "string" - example: "my-node" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Role: - description: "Role of the node." - type: "string" - enum: - - "worker" - - "manager" - example: "manager" - Availability: - description: "Availability of the node." - type: "string" - enum: - - "active" - - "pause" - - "drain" - example: "active" - example: - Availability: "active" - Name: "node-name" - Role: "manager" - Labels: - foo: "bar" - - Node: - type: "object" - properties: - ID: - type: "string" - example: "24ifsmvkjbyhk" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - description: | - Date and time at which the node was added to the swarm in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - UpdatedAt: - description: | - Date and time at which the node was last updated in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2017-08-09T07:09:37.632105588Z" - Spec: - $ref: "#/definitions/NodeSpec" - Description: - $ref: "#/definitions/NodeDescription" - Status: - $ref: "#/definitions/NodeStatus" - ManagerStatus: - $ref: "#/definitions/ManagerStatus" - - NodeDescription: - description: | - NodeDescription encapsulates the properties of the Node as reported by the - agent. - type: "object" - properties: - Hostname: - type: "string" - example: "bf3067039e47" - Platform: - $ref: "#/definitions/Platform" - Resources: - $ref: "#/definitions/ResourceObject" - Engine: - $ref: "#/definitions/EngineDescription" - TLSInfo: - $ref: "#/definitions/TLSInfo" - - Platform: - description: | - Platform represents the platform (Arch/OS). - type: "object" - properties: - Architecture: - description: | - Architecture represents the hardware architecture (for example, - `x86_64`). - type: "string" - example: "x86_64" - OS: - description: | - OS represents the Operating System (for example, `linux` or `windows`). - type: "string" - example: "linux" - - EngineDescription: - description: "EngineDescription provides information about an engine." - type: "object" - properties: - EngineVersion: - type: "string" - example: "17.06.0" - Labels: - type: "object" - additionalProperties: - type: "string" - example: - foo: "bar" - Plugins: - type: "array" - items: - type: "object" - properties: - Type: - type: "string" - Name: - type: "string" - example: - - Type: "Log" - Name: "awslogs" - - Type: "Log" - Name: "fluentd" - - Type: "Log" - Name: "gcplogs" - - Type: "Log" - Name: "gelf" - - Type: "Log" - Name: "journald" - - Type: "Log" - Name: "json-file" - - Type: "Log" - Name: "logentries" - - Type: "Log" - Name: "splunk" - - Type: "Log" - Name: "syslog" - - Type: "Network" - Name: "bridge" - - Type: "Network" - Name: "host" - - Type: "Network" - Name: "ipvlan" - - Type: "Network" - Name: "macvlan" - - Type: "Network" - Name: "null" - - Type: "Network" - Name: "overlay" - - Type: "Volume" - Name: "local" - - Type: "Volume" - Name: "localhost:5000/vieux/sshfs:latest" - - Type: "Volume" - Name: "vieux/sshfs:latest" - - TLSInfo: - description: | - Information about the issuer of leaf TLS certificates and the trusted root - CA certificate. - type: "object" - properties: - TrustRoot: - description: | - The root CA certificate(s) that are used to validate leaf TLS - certificates. - type: "string" - CertIssuerSubject: - description: - The base64-url-safe-encoded raw subject bytes of the issuer. - type: "string" - CertIssuerPublicKey: - description: | - The base64-url-safe-encoded raw public key bytes of the issuer. - type: "string" - example: - TrustRoot: | - -----BEGIN CERTIFICATE----- - MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw - EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 - MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH - A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf - 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB - Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO - PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz - pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H - -----END CERTIFICATE----- - CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" - CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" - - NodeStatus: - description: | - NodeStatus represents the status of a node. - - It provides the current status of the node, as seen by the manager. - type: "object" - properties: - State: - $ref: "#/definitions/NodeState" - Message: - type: "string" - example: "" - Addr: - description: "IP address of the node." - type: "string" - example: "172.17.0.2" - - NodeState: - description: "NodeState represents the state of a node." - type: "string" - enum: - - "unknown" - - "down" - - "ready" - - "disconnected" - example: "ready" - - ManagerStatus: - description: | - ManagerStatus represents the status of a manager. - - It provides the current status of a node's manager component, if the node - is a manager. - x-nullable: true - type: "object" - properties: - Leader: - type: "boolean" - default: false - example: true - Reachability: - $ref: "#/definitions/Reachability" - Addr: - description: | - The IP address and port at which the manager is reachable. - type: "string" - example: "10.0.0.46:2377" - - Reachability: - description: "Reachability represents the reachability of a node." - type: "string" - enum: - - "unknown" - - "unreachable" - - "reachable" - example: "reachable" - - SwarmSpec: - description: "User modifiable swarm configuration." - type: "object" - properties: - Name: - description: "Name of the swarm." - type: "string" - example: "default" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.corp.type: "production" - com.example.corp.department: "engineering" - Orchestration: - description: "Orchestration configuration." - type: "object" - x-nullable: true - properties: - TaskHistoryRetentionLimit: - description: | - The number of historic tasks to keep per instance or node. If - negative, never remove completed or failed tasks. - type: "integer" - format: "int64" - example: 10 - Raft: - description: "Raft configuration." - type: "object" - properties: - SnapshotInterval: - description: "The number of log entries between snapshots." - type: "integer" - format: "uint64" - example: 10000 - KeepOldSnapshots: - description: | - The number of snapshots to keep beyond the current snapshot. - type: "integer" - format: "uint64" - LogEntriesForSlowFollowers: - description: | - The number of log entries to keep around to sync up slow followers - after a snapshot is created. - type: "integer" - format: "uint64" - example: 500 - ElectionTick: - description: | - The number of ticks that a follower will wait for a message from - the leader before becoming a candidate and starting an election. - `ElectionTick` must be greater than `HeartbeatTick`. - - A tick currently defaults to one second, so these translate - directly to seconds currently, but this is NOT guaranteed. - type: "integer" - example: 3 - HeartbeatTick: - description: | - The number of ticks between heartbeats. Every HeartbeatTick ticks, - the leader will send a heartbeat to the followers. - - A tick currently defaults to one second, so these translate - directly to seconds currently, but this is NOT guaranteed. - type: "integer" - example: 1 - Dispatcher: - description: "Dispatcher configuration." - type: "object" - x-nullable: true - properties: - HeartbeatPeriod: - description: | - The delay for an agent to send a heartbeat to the dispatcher. - type: "integer" - format: "int64" - example: 5000000000 - CAConfig: - description: "CA configuration." - type: "object" - x-nullable: true - properties: - NodeCertExpiry: - description: "The duration node certificates are issued for." - type: "integer" - format: "int64" - example: 7776000000000000 - ExternalCAs: - description: | - Configuration for forwarding signing requests to an external - certificate authority. - type: "array" - items: - type: "object" - properties: - Protocol: - description: | - Protocol for communication with the external CA (currently - only `cfssl` is supported). - type: "string" - enum: - - "cfssl" - default: "cfssl" - URL: - description: | - URL where certificate signing requests should be sent. - type: "string" - Options: - description: | - An object with key/value pairs that are interpreted as - protocol-specific options for the external CA driver. - type: "object" - additionalProperties: - type: "string" - CACert: - description: | - The root CA certificate (in PEM format) this external CA uses - to issue TLS certificates (assumed to be to the current swarm - root CA certificate if not provided). - type: "string" - SigningCACert: - description: | - The desired signing CA certificate for all swarm node TLS leaf - certificates, in PEM format. - type: "string" - SigningCAKey: - description: | - The desired signing CA key for all swarm node TLS leaf certificates, - in PEM format. - type: "string" - ForceRotate: - description: | - An integer whose purpose is to force swarm to generate a new - signing CA certificate and key, if none have been specified in - `SigningCACert` and `SigningCAKey` - format: "uint64" - type: "integer" - EncryptionConfig: - description: "Parameters related to encryption-at-rest." - type: "object" - properties: - AutoLockManagers: - description: | - If set, generate a key and use it to lock data stored on the - managers. - type: "boolean" - example: false - TaskDefaults: - description: "Defaults for creating tasks in this cluster." - type: "object" - properties: - LogDriver: - description: | - The log driver to use for tasks created in the orchestrator if - unspecified by a service. - - Updating this value only affects new tasks. Existing tasks continue - to use their previously configured log driver until recreated. - type: "object" - properties: - Name: - description: | - The log driver to use as a default for new tasks. - type: "string" - example: "json-file" - Options: - description: | - Driver-specific options for the selectd log driver, specified - as key/value pairs. - type: "object" - additionalProperties: - type: "string" - example: - "max-file": "10" - "max-size": "100m" - - # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but - # without `JoinTokens`. - ClusterInfo: - description: | - ClusterInfo represents information about the swarm as is returned by the - "/info" endpoint. Join-tokens are not included. - x-nullable: true - type: "object" - properties: - ID: - description: "The ID of the swarm." - type: "string" - example: "abajmipo7b4xz5ip2nrla6b11" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - description: | - Date and time at which the swarm was initialised in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - UpdatedAt: - description: | - Date and time at which the swarm was last updated in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2017-08-09T07:09:37.632105588Z" - Spec: - $ref: "#/definitions/SwarmSpec" - TLSInfo: - $ref: "#/definitions/TLSInfo" - RootRotationInProgress: - description: | - Whether there is currently a root CA rotation in progress for the swarm - type: "boolean" - example: false - DataPathPort: - description: | - DataPathPort specifies the data path port number for data traffic. - Acceptable port range is 1024 to 49151. - If no port is set or is set to 0, the default port (4789) is used. - type: "integer" - format: "uint32" - default: 4789 - example: 4789 - DefaultAddrPool: - description: | - Default Address Pool specifies default subnet pools for global scope - networks. - type: "array" - items: - type: "string" - format: "CIDR" - example: ["10.10.0.0/16", "20.20.0.0/16"] - SubnetSize: - description: | - SubnetSize specifies the subnet size of the networks created from the - default subnet pool. - type: "integer" - format: "uint32" - maximum: 29 - default: 24 - example: 24 - - JoinTokens: - description: | - JoinTokens contains the tokens workers and managers need to join the swarm. - type: "object" - properties: - Worker: - description: | - The token workers can use to join the swarm. - type: "string" - example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" - Manager: - description: | - The token managers can use to join the swarm. - type: "string" - example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - - Swarm: - type: "object" - allOf: - - $ref: "#/definitions/ClusterInfo" - - type: "object" - properties: - JoinTokens: - $ref: "#/definitions/JoinTokens" - - TaskSpec: - description: "User modifiable task configuration." - type: "object" - properties: - PluginSpec: - type: "object" - description: | - Plugin spec for the service. *(Experimental release only.)* - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - properties: - Name: - description: "The name or 'alias' to use for the plugin." - type: "string" - Remote: - description: "The plugin image reference to use." - type: "string" - Disabled: - description: "Disable the plugin once scheduled." - type: "boolean" - PluginPrivilege: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - ContainerSpec: - type: "object" - description: | - Container spec for the service. - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - properties: - Image: - description: "The image name to use for the container" - type: "string" - Labels: - description: "User-defined key/value data." - type: "object" - additionalProperties: - type: "string" - Command: - description: "The command to be run in the image." - type: "array" - items: - type: "string" - Args: - description: "Arguments to the command." - type: "array" - items: - type: "string" - Hostname: - description: | - The hostname to use for the container, as a valid - [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. - type: "string" - Env: - description: | - A list of environment variables in the form `VAR=value`. - type: "array" - items: - type: "string" - Dir: - description: "The working directory for commands to run in." - type: "string" - User: - description: "The user inside the container." - type: "string" - Groups: - type: "array" - description: | - A list of additional groups that the container process will run as. - items: - type: "string" - Privileges: - type: "object" - description: "Security options for the container" - properties: - CredentialSpec: - type: "object" - description: "CredentialSpec for managed service account (Windows only)" - properties: - Config: - type: "string" - example: "0bt9dmxjvjiqermk6xrop3ekq" - description: | - Load credential spec from a Swarm Config with the given ID. - The specified config must also be present in the Configs - field with the Runtime property set. - -


- - - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. - File: - type: "string" - example: "spec.json" - description: | - Load credential spec from this file. The file is read by - the daemon, and must be present in the `CredentialSpecs` - subdirectory in the docker data directory, which defaults - to `C:\ProgramData\Docker\` on Windows. - - For example, specifying `spec.json` loads - `C:\ProgramData\Docker\CredentialSpecs\spec.json`. - -


- - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. - Registry: - type: "string" - description: | - Load credential spec from this value in the Windows - registry. The specified registry value must be located in: - - `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` - -


- - - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. - SELinuxContext: - type: "object" - description: "SELinux labels of the container" - properties: - Disable: - type: "boolean" - description: "Disable SELinux" - User: - type: "string" - description: "SELinux user label" - Role: - type: "string" - description: "SELinux role label" - Type: - type: "string" - description: "SELinux type label" - Level: - type: "string" - description: "SELinux level label" - TTY: - description: "Whether a pseudo-TTY should be allocated." - type: "boolean" - OpenStdin: - description: "Open `stdin`" - type: "boolean" - ReadOnly: - description: "Mount the container's root filesystem as read only." - type: "boolean" - Mounts: - description: | - Specification for mounts to be added to containers created as part - of the service. - type: "array" - items: - $ref: "#/definitions/Mount" - StopSignal: - description: "Signal to stop the container." - type: "string" - StopGracePeriod: - description: | - Amount of time to wait for the container to terminate before - forcefully killing it. - type: "integer" - format: "int64" - HealthCheck: - $ref: "#/definitions/HealthConfig" - Hosts: - type: "array" - description: | - A list of hostname/IP mappings to add to the container's `hosts` - file. The format of extra hosts is specified in the - [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) - man page: - - IP_address canonical_hostname [aliases...] - items: - type: "string" - DNSConfig: - description: | - Specification for DNS related configurations in resolver configuration - file (`resolv.conf`). - type: "object" - properties: - Nameservers: - description: "The IP addresses of the name servers." - type: "array" - items: - type: "string" - Search: - description: "A search list for host-name lookup." - type: "array" - items: - type: "string" - Options: - description: | - A list of internal resolver variables to be modified (e.g., - `debug`, `ndots:3`, etc.). - type: "array" - items: - type: "string" - Secrets: - description: | - Secrets contains references to zero or more secrets that will be - exposed to the service. - type: "array" - items: - type: "object" - properties: - File: - description: | - File represents a specific target that is backed by a file. - type: "object" - properties: - Name: - description: | - Name represents the final filename in the filesystem. - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - SecretID: - description: | - SecretID represents the ID of the specific secret that we're - referencing. - type: "string" - SecretName: - description: | - SecretName is the name of the secret that this references, - but this is just provided for lookup/display purposes. The - secret in the reference will be identified by its ID. - type: "string" - Configs: - description: | - Configs contains references to zero or more configs that will be - exposed to the service. - type: "array" - items: - type: "object" - properties: - File: - description: | - File represents a specific target that is backed by a file. - -


- - > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive - type: "object" - properties: - Name: - description: | - Name represents the final filename in the filesystem. - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - Runtime: - description: | - Runtime represents a target that is not mounted into the - container but is used by the task - -


- - > **Note**: `Configs.File` and `Configs.Runtime` are mutually - > exclusive - type: "object" - ConfigID: - description: | - ConfigID represents the ID of the specific config that we're - referencing. - type: "string" - ConfigName: - description: | - ConfigName is the name of the config that this references, - but this is just provided for lookup/display purposes. The - config in the reference will be identified by its ID. - type: "string" - Isolation: - type: "string" - description: | - Isolation technology of the containers running the service. - (Windows only) - enum: - - "default" - - "process" - - "hyperv" - Init: - description: | - Run an init inside the container that forwards signals and reaps - processes. This field is omitted if empty, and the default (as - configured on the daemon) is used. - type: "boolean" - x-nullable: true - Sysctls: - description: | - Set kernel namedspaced parameters (sysctls) in the container. - The Sysctls option on services accepts the same sysctls as the - are supported on containers. Note that while the same sysctls are - supported, no guarantees or checks are made about their - suitability for a clustered environment, and it's up to the user - to determine whether a given sysctl will work properly in a - Service. - type: "object" - additionalProperties: - type: "string" - # This option is not used by Windows containers - CapabilityAdd: - type: "array" - description: | - A list of kernel capabilities to add to the default set - for the container. - items: - type: "string" - example: - - "CAP_NET_RAW" - - "CAP_SYS_ADMIN" - - "CAP_SYS_CHROOT" - - "CAP_SYSLOG" - CapabilityDrop: - type: "array" - description: | - A list of kernel capabilities to drop from the default set - for the container. - items: - type: "string" - example: - - "CAP_NET_RAW" - Ulimits: - description: | - A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" - type: "array" - items: - type: "object" - properties: - Name: - description: "Name of ulimit" - type: "string" - Soft: - description: "Soft limit" - type: "integer" - Hard: - description: "Hard limit" - type: "integer" - NetworkAttachmentSpec: - description: | - Read-only spec type for non-swarm containers attached to swarm overlay - networks. - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - type: "object" - properties: - ContainerID: - description: "ID of the container represented by this task" - type: "string" - Resources: - description: | - Resource requirements which apply to each individual container created - as part of the service. - type: "object" - properties: - Limits: - description: "Define resources limits." - $ref: "#/definitions/Limit" - Reservations: - description: "Define resources reservation." - $ref: "#/definitions/ResourceObject" - RestartPolicy: - description: | - Specification for the restart policy which applies to containers - created as part of this service. - type: "object" - properties: - Condition: - description: "Condition for restart." - type: "string" - enum: - - "none" - - "on-failure" - - "any" - Delay: - description: "Delay between restart attempts." - type: "integer" - format: "int64" - MaxAttempts: - description: | - Maximum attempts to restart a given container before giving up - (default value is 0, which is ignored). - type: "integer" - format: "int64" - default: 0 - Window: - description: | - Windows is the time window used to evaluate the restart policy - (default value is 0, which is unbounded). - type: "integer" - format: "int64" - default: 0 - Placement: - type: "object" - properties: - Constraints: - description: | - An array of constraint expressions to limit the set of nodes where - a task can be scheduled. Constraint expressions can either use a - _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find - nodes that satisfy every expression (AND match). Constraints can - match node or Docker Engine labels as follows: - - node attribute | matches | example - ---------------------|--------------------------------|----------------------------------------------- - `node.id` | Node ID | `node.id==2ivku8v2gvtg4` - `node.hostname` | Node hostname | `node.hostname!=node-2` - `node.role` | Node role (`manager`/`worker`) | `node.role==manager` - `node.platform.os` | Node operating system | `node.platform.os==windows` - `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` - `node.labels` | User-defined node labels | `node.labels.security==high` - `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` - - `engine.labels` apply to Docker Engine labels like operating system, - drivers, etc. Swarm administrators add `node.labels` for operational - purposes by using the [`node update endpoint`](#operation/NodeUpdate). - - type: "array" - items: - type: "string" - example: - - "node.hostname!=node3.corp.example.com" - - "node.role!=manager" - - "node.labels.type==production" - - "node.platform.os==linux" - - "node.platform.arch==x86_64" - Preferences: - description: | - Preferences provide a way to make the scheduler aware of factors - such as topology. They are provided in order from highest to - lowest precedence. - type: "array" - items: - type: "object" - properties: - Spread: - type: "object" - properties: - SpreadDescriptor: - description: | - label descriptor, such as `engine.labels.az`. - type: "string" - example: - - Spread: - SpreadDescriptor: "node.labels.datacenter" - - Spread: - SpreadDescriptor: "node.labels.rack" - MaxReplicas: - description: | - Maximum number of replicas for per node (default value is 0, which - is unlimited) - type: "integer" - format: "int64" - default: 0 - Platforms: - description: | - Platforms stores all the platforms that the service's image can - run on. This field is used in the platform filter for scheduling. - If empty, then the platform filter is off, meaning there are no - scheduling restrictions. - type: "array" - items: - $ref: "#/definitions/Platform" - ForceUpdate: - description: | - A counter that triggers an update even if no relevant parameters have - been changed. - type: "integer" - Runtime: - description: | - Runtime is the type of runtime specified for the task executor. - type: "string" - Networks: - description: "Specifies which networks the service should attach to." - type: "array" - items: - $ref: "#/definitions/NetworkAttachmentConfig" - LogDriver: - description: | - Specifies the log driver to use for tasks created from this spec. If - not present, the default one for the swarm will be used, finally - falling back to the engine default if not specified. - type: "object" - properties: - Name: - type: "string" - Options: - type: "object" - additionalProperties: - type: "string" - - TaskState: - type: "string" - enum: - - "new" - - "allocated" - - "pending" - - "assigned" - - "accepted" - - "preparing" - - "ready" - - "starting" - - "running" - - "complete" - - "shutdown" - - "failed" - - "rejected" - - "remove" - - "orphaned" - - Task: - type: "object" - properties: - ID: - description: "The ID of the task." - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Name: - description: "Name of the task." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Spec: - $ref: "#/definitions/TaskSpec" - ServiceID: - description: "The ID of the service this task is part of." - type: "string" - Slot: - type: "integer" - NodeID: - description: "The ID of the node that this task is on." - type: "string" - AssignedGenericResources: - $ref: "#/definitions/GenericResources" - Status: - type: "object" - properties: - Timestamp: - type: "string" - format: "dateTime" - State: - $ref: "#/definitions/TaskState" - Message: - type: "string" - Err: - type: "string" - ContainerStatus: - type: "object" - properties: - ContainerID: - type: "string" - PID: - type: "integer" - ExitCode: - type: "integer" - DesiredState: - $ref: "#/definitions/TaskState" - JobIteration: - description: | - If the Service this Task belongs to is a job-mode service, contains - the JobIteration of the Service this Task was created for. Absent if - the Task was created for a Replicated or Global Service. - $ref: "#/definitions/ObjectVersion" - example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - AssignedGenericResources: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - - ServiceSpec: - description: "User modifiable configuration for a service." - type: object - properties: - Name: - description: "Name of the service." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - TaskTemplate: - $ref: "#/definitions/TaskSpec" - Mode: - description: "Scheduling mode for the service." - type: "object" - properties: - Replicated: - type: "object" - properties: - Replicas: - type: "integer" - format: "int64" - Global: - type: "object" - ReplicatedJob: - description: | - The mode used for services with a finite number of tasks that run - to a completed state. - type: "object" - properties: - MaxConcurrent: - description: | - The maximum number of replicas to run simultaneously. - type: "integer" - format: "int64" - default: 1 - TotalCompletions: - description: | - The total number of replicas desired to reach the Completed - state. If unset, will default to the value of `MaxConcurrent` - type: "integer" - format: "int64" - GlobalJob: - description: | - The mode used for services which run a task to the completed state - on each valid node. - type: "object" - UpdateConfig: - description: "Specification for the update strategy of the service." - type: "object" - properties: - Parallelism: - description: | - Maximum number of tasks to be updated in one iteration (0 means - unlimited parallelism). - type: "integer" - format: "int64" - Delay: - description: "Amount of time between updates, in nanoseconds." - type: "integer" - format: "int64" - FailureAction: - description: | - Action to take if an updated task fails to run, or stops running - during the update. - type: "string" - enum: - - "continue" - - "pause" - - "rollback" - Monitor: - description: | - Amount of time to monitor each updated task for failures, in - nanoseconds. - type: "integer" - format: "int64" - MaxFailureRatio: - description: | - The fraction of tasks that may fail during an update before the - failure action is invoked, specified as a floating point number - between 0 and 1. - type: "number" - default: 0 - Order: - description: | - The order of operations when rolling out an updated task. Either - the old task is shut down before the new task is started, or the - new task is started before the old task is shut down. - type: "string" - enum: - - "stop-first" - - "start-first" - RollbackConfig: - description: "Specification for the rollback strategy of the service." - type: "object" - properties: - Parallelism: - description: | - Maximum number of tasks to be rolled back in one iteration (0 means - unlimited parallelism). - type: "integer" - format: "int64" - Delay: - description: | - Amount of time between rollback iterations, in nanoseconds. - type: "integer" - format: "int64" - FailureAction: - description: | - Action to take if an rolled back task fails to run, or stops - running during the rollback. - type: "string" - enum: - - "continue" - - "pause" - Monitor: - description: | - Amount of time to monitor each rolled back task for failures, in - nanoseconds. - type: "integer" - format: "int64" - MaxFailureRatio: - description: | - The fraction of tasks that may fail during a rollback before the - failure action is invoked, specified as a floating point number - between 0 and 1. - type: "number" - default: 0 - Order: - description: | - The order of operations when rolling back a task. Either the old - task is shut down before the new task is started, or the new task - is started before the old task is shut down. - type: "string" - enum: - - "stop-first" - - "start-first" - Networks: - description: "Specifies which networks the service should attach to." - type: "array" - items: - $ref: "#/definitions/NetworkAttachmentConfig" - - EndpointSpec: - $ref: "#/definitions/EndpointSpec" - - EndpointPortConfig: - type: "object" - properties: - Name: - type: "string" - Protocol: - type: "string" - enum: - - "tcp" - - "udp" - - "sctp" - TargetPort: - description: "The port inside the container." - type: "integer" - PublishedPort: - description: "The port on the swarm hosts." - type: "integer" - PublishMode: - description: | - The mode in which port is published. - -


- - - "ingress" makes the target port accessible on every node, - regardless of whether there is a task for the service running on - that node or not. - - "host" bypasses the routing mesh and publish the port directly on - the swarm node where that service is running. - - type: "string" - enum: - - "ingress" - - "host" - default: "ingress" - example: "ingress" - - EndpointSpec: - description: "Properties that can be configured to access and load balance a service." - type: "object" - properties: - Mode: - description: | - The mode of resolution to use for internal load balancing between tasks. - type: "string" - enum: - - "vip" - - "dnsrr" - default: "vip" - Ports: - description: | - List of exposed ports that this service is accessible on from the - outside. Ports can only be provided if `vip` resolution mode is used. - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - - Service: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ServiceSpec" - Endpoint: - type: "object" - properties: - Spec: - $ref: "#/definitions/EndpointSpec" - Ports: - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - VirtualIPs: - type: "array" - items: - type: "object" - properties: - NetworkID: - type: "string" - Addr: - type: "string" - UpdateStatus: - description: "The status of a service update." - type: "object" - properties: - State: - type: "string" - enum: - - "updating" - - "paused" - - "completed" - StartedAt: - type: "string" - format: "dateTime" - CompletedAt: - type: "string" - format: "dateTime" - Message: - type: "string" - ServiceStatus: - description: | - The status of the service's tasks. Provided only when requested as - part of a ServiceList operation. - type: "object" - properties: - RunningTasks: - description: | - The number of tasks for the service currently in the Running state. - type: "integer" - format: "uint64" - example: 7 - DesiredTasks: - description: | - The number of tasks for the service desired to be running. - For replicated services, this is the replica count from the - service spec. For global services, this is computed by taking - count of all tasks for the service with a Desired State other - than Shutdown. - type: "integer" - format: "uint64" - example: 10 - CompletedTasks: - description: | - The number of tasks for a job that are in the Completed state. - This field must be cross-referenced with the service type, as the - value of 0 may mean the service is not in a job mode, or it may - mean the job-mode service has no tasks yet Completed. - type: "integer" - format: "uint64" - JobStatus: - description: | - The status of the service when it is in one of ReplicatedJob or - GlobalJob modes. Absent on Replicated and Global mode services. The - JobIteration is an ObjectVersion, but unlike the Service's version, - does not need to be sent with an update request. - type: "object" - properties: - JobIteration: - description: | - JobIteration is a value increased each time a Job is executed, - successfully or otherwise. "Executed", in this case, means the - job as a whole has been started, not that an individual Task has - been launched. A job is "Executed" when its ServiceSpec is - updated. JobIteration can be used to disambiguate Tasks belonging - to different executions of a job. Though JobIteration will - increase with each subsequent execution, it may not necessarily - increase by 1, and so JobIteration should not be used to - $ref: "#/definitions/ObjectVersion" - LastExecution: - description: | - The last time, as observed by the server, that this job was - started. - type: "string" - format: "dateTime" - example: - ID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Version: - Index: 19 - CreatedAt: "2016-06-07T21:05:51.880065305Z" - UpdatedAt: "2016-06-07T21:07:29.962229872Z" - Spec: - Name: "hopeful_cori" - TaskTemplate: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Endpoint: - Spec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - VirtualIPs: - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.2/16" - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.3/16" - - ImageDeleteResponseItem: - type: "object" - properties: - Untagged: - description: "The image ID of an image that was untagged" - type: "string" - Deleted: - description: "The image ID of an image that was deleted" - type: "string" - - ServiceUpdateResponse: - type: "object" - properties: - Warnings: - description: "Optional warning messages" - type: "array" - items: - type: "string" - example: - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - - ContainerSummary: - type: "object" - properties: - Id: - description: "The ID of this container" - type: "string" - x-go-name: "ID" - Names: - description: "The names that this container has been given" - type: "array" - items: - type: "string" - Image: - description: "The name of the image used when creating this container" - type: "string" - ImageID: - description: "The ID of the image that this container was created from" - type: "string" - Command: - description: "Command to run when starting the container" - type: "string" - Created: - description: "When the container was created" - type: "integer" - format: "int64" - Ports: - description: "The ports exposed by this container" - type: "array" - items: - $ref: "#/definitions/Port" - SizeRw: - description: "The size of files that have been created or changed by this container" - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container" - type: "integer" - format: "int64" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - State: - description: "The state of this container (e.g. `Exited`)" - type: "string" - Status: - description: "Additional human-readable status of this container (e.g. `Exit 0`)" - type: "string" - HostConfig: - type: "object" - properties: - NetworkMode: - type: "string" - NetworkSettings: - description: "A summary of the container's network settings" - type: "object" - properties: - Networks: - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - Mounts: - type: "array" - items: - $ref: "#/definitions/MountPoint" - - Driver: - description: "Driver represents a driver (network, logging, secrets)." - type: "object" - required: [Name] - properties: - Name: - description: "Name of the driver." - type: "string" - x-nullable: false - example: "some-driver" - Options: - description: "Key/value map of driver-specific options." - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: - OptionA: "value for driver-specific option A" - OptionB: "value for driver-specific option B" - - SecretSpec: - type: "object" - properties: - Name: - description: "User-defined name of the secret." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Data: - description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. - - This field is only used to _create_ a secret, and is not returned by - other endpoints. - type: "string" - example: "" - Driver: - description: | - Name of the secrets driver used to fetch the secret's value from an - external secret store. - $ref: "#/definitions/Driver" - Templating: - description: | - Templating driver, if applicable - - Templating controls whether and how to evaluate the config payload as - a template. If no driver is set, no templating is used. - $ref: "#/definitions/Driver" - - Secret: - type: "object" - properties: - ID: - type: "string" - example: "blt1owaxmitz71s9v5zh81zun" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - example: "2017-07-20T13:55:28.678958722Z" - UpdatedAt: - type: "string" - format: "dateTime" - example: "2017-07-20T13:55:28.678958722Z" - Spec: - $ref: "#/definitions/SecretSpec" - - ConfigSpec: - type: "object" - properties: - Name: - description: "User-defined name of the config." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Data: - description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. - type: "string" - Templating: - description: | - Templating driver, if applicable - - Templating controls whether and how to evaluate the config payload as - a template. If no driver is set, no templating is used. - $ref: "#/definitions/Driver" - - Config: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ConfigSpec" - - ContainerState: - description: | - ContainerState stores container's running state. It's part of ContainerJSONBase - and will be returned by the "inspect" command. - type: "object" - x-nullable: true - properties: - Status: - description: | - String representation of the container state. Can be one of "created", - "running", "paused", "restarting", "removing", "exited", or "dead". - type: "string" - enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] - example: "running" - Running: - description: | - Whether this container is running. - - Note that a running container can be _paused_. The `Running` and `Paused` - booleans are not mutually exclusive: - - When pausing a container (on Linux), the freezer cgroup is used to suspend - all processes in the container. Freezing the process requires the process to - be running. As a result, paused containers are both `Running` _and_ `Paused`. - - Use the `Status` field instead to determine if a container's state is "running". - type: "boolean" - example: true - Paused: - description: "Whether this container is paused." - type: "boolean" - example: false - Restarting: - description: "Whether this container is restarting." - type: "boolean" - example: false - OOMKilled: - description: | - Whether a process within this container has been killed because it ran - out of memory since the container was last started. - type: "boolean" - example: false - Dead: - type: "boolean" - example: false - Pid: - description: "The process ID of this container" - type: "integer" - example: 1234 - ExitCode: - description: "The last exit code of this container" - type: "integer" - example: 0 - Error: - type: "string" - StartedAt: - description: "The time when this container was last started." - type: "string" - example: "2020-01-06T09:06:59.461876391Z" - FinishedAt: - description: "The time when this container last exited." - type: "string" - example: "2020-01-06T09:07:59.461876391Z" - Health: - $ref: "#/definitions/Health" - - ContainerCreateResponse: - description: "OK response to ContainerCreate operation" - type: "object" - title: "ContainerCreateResponse" - x-go-name: "CreateResponse" - required: [Id, Warnings] - properties: - Id: - description: "The ID of the created container" - type: "string" - x-nullable: false - example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Warnings: - description: "Warnings encountered when creating the container" - type: "array" - x-nullable: false - items: - type: "string" - example: [] - - ContainerWaitResponse: - description: "OK response to ContainerWait operation" - type: "object" - x-go-name: "WaitResponse" - title: "ContainerWaitResponse" - required: [StatusCode] - properties: - StatusCode: - description: "Exit code of the container" - type: "integer" - format: "int64" - x-nullable: false - Error: - $ref: "#/definitions/ContainerWaitExitError" - - ContainerWaitExitError: - description: "container waiting error, if any" - type: "object" - x-go-name: "WaitExitError" - properties: - Message: - description: "Details of an error" - type: "string" - - SystemVersion: - type: "object" - description: | - Response of Engine API: GET "/version" - properties: - Platform: - type: "object" - required: [Name] - properties: - Name: - type: "string" - Components: - type: "array" - description: | - Information about system components - items: - type: "object" - x-go-name: ComponentVersion - required: [Name, Version] - properties: - Name: - description: | - Name of the component - type: "string" - example: "Engine" - Version: - description: | - Version of the component - type: "string" - x-nullable: false - example: "19.03.12" - Details: - description: | - Key/value pairs of strings with additional information about the - component. These values are intended for informational purposes - only, and their content is not defined, and not part of the API - specification. - - These messages can be printed by the client as information to the user. - type: "object" - x-nullable: true - Version: - description: "The version of the daemon" - type: "string" - example: "19.03.12" - ApiVersion: - description: | - The default (and highest) API version that is supported by the daemon - type: "string" - example: "1.40" - MinAPIVersion: - description: | - The minimum API version that is supported by the daemon - type: "string" - example: "1.12" - GitCommit: - description: | - The Git commit of the source code that was used to build the daemon - type: "string" - example: "48a66213fe" - GoVersion: - description: | - The version Go used to compile the daemon, and the version of the Go - runtime in use. - type: "string" - example: "go1.13.14" - Os: - description: | - The operating system that the daemon is running on ("linux" or "windows") - type: "string" - example: "linux" - Arch: - description: | - The architecture that the daemon is running on - type: "string" - example: "amd64" - KernelVersion: - description: | - The kernel version (`uname -r`) that the daemon is running on. - - This field is omitted when empty. - type: "string" - example: "4.19.76-linuxkit" - Experimental: - description: | - Indicates if the daemon is started with experimental features enabled. - - This field is omitted when empty / false. - type: "boolean" - example: true - BuildTime: - description: | - The date and time that the daemon was compiled. - type: "string" - example: "2020-06-22T15:49:27.000000000+00:00" - - SystemInfo: - type: "object" - properties: - ID: - description: | - Unique identifier of the daemon. - -


- - > **Note**: The format of the ID itself is not part of the API, and - > should not be considered stable. - type: "string" - example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" - Containers: - description: "Total number of containers on the host." - type: "integer" - example: 14 - ContainersRunning: - description: | - Number of containers with status `"running"`. - type: "integer" - example: 3 - ContainersPaused: - description: | - Number of containers with status `"paused"`. - type: "integer" - example: 1 - ContainersStopped: - description: | - Number of containers with status `"stopped"`. - type: "integer" - example: 10 - Images: - description: | - Total number of images on the host. - - Both _tagged_ and _untagged_ (dangling) images are counted. - type: "integer" - example: 508 - Driver: - description: "Name of the storage driver in use." - type: "string" - example: "overlay2" - DriverStatus: - description: | - Information specific to the storage driver, provided as - "label" / "value" pairs. - - This information is provided by the storage driver, and formatted - in a way consistent with the output of `docker info` on the command - line. - -


- - > **Note**: The information returned in this field, including the - > formatting of values and labels, should not be considered stable, - > and may change without notice. - type: "array" - items: - type: "array" - items: - type: "string" - example: - - ["Backing Filesystem", "extfs"] - - ["Supports d_type", "true"] - - ["Native Overlay Diff", "true"] - DockerRootDir: - description: | - Root directory of persistent Docker state. - - Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` - on Windows. - type: "string" - example: "/var/lib/docker" - Plugins: - $ref: "#/definitions/PluginsInfo" - MemoryLimit: - description: "Indicates if the host has memory limit support enabled." - type: "boolean" - example: true - SwapLimit: - description: "Indicates if the host has memory swap limit support enabled." - type: "boolean" - example: true - KernelMemoryTCP: - description: | - Indicates if the host has kernel memory TCP limit support enabled. This - field is omitted if not supported. - - Kernel memory TCP limits are not supported when using cgroups v2, which - does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. - type: "boolean" - example: true - CpuCfsPeriod: - description: | - Indicates if CPU CFS(Completely Fair Scheduler) period is supported by - the host. - type: "boolean" - example: true - CpuCfsQuota: - description: | - Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by - the host. - type: "boolean" - example: true - CPUShares: - description: | - Indicates if CPU Shares limiting is supported by the host. - type: "boolean" - example: true - CPUSet: - description: | - Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. - - See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) - type: "boolean" - example: true - PidsLimit: - description: "Indicates if the host kernel has PID limit support enabled." - type: "boolean" - example: true - OomKillDisable: - description: "Indicates if OOM killer disable is supported on the host." - type: "boolean" - IPv4Forwarding: - description: "Indicates IPv4 forwarding is enabled." - type: "boolean" - example: true - BridgeNfIptables: - description: "Indicates if `bridge-nf-call-iptables` is available on the host." - type: "boolean" - example: true - BridgeNfIp6tables: - description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." - type: "boolean" - example: true - Debug: - description: | - Indicates if the daemon is running in debug-mode / with debug-level - logging enabled. - type: "boolean" - example: true - NFd: - description: | - The total number of file Descriptors in use by the daemon process. - - This information is only returned if debug-mode is enabled. - type: "integer" - example: 64 - NGoroutines: - description: | - The number of goroutines that currently exist. - - This information is only returned if debug-mode is enabled. - type: "integer" - example: 174 - SystemTime: - description: | - Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) - format with nano-seconds. - type: "string" - example: "2017-08-08T20:28:29.06202363Z" - LoggingDriver: - description: | - The logging driver to use as a default for new containers. - type: "string" - CgroupDriver: - description: | - The driver to use for managing cgroups. - type: "string" - enum: ["cgroupfs", "systemd", "none"] - default: "cgroupfs" - example: "cgroupfs" - CgroupVersion: - description: | - The version of the cgroup. - type: "string" - enum: ["1", "2"] - default: "1" - example: "1" - NEventsListener: - description: "Number of event listeners subscribed." - type: "integer" - example: 30 - KernelVersion: - description: | - Kernel version of the host. - - On Linux, this information obtained from `uname`. On Windows this - information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ - registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. - type: "string" - example: "4.9.38-moby" - OperatingSystem: - description: | - Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" - or "Windows Server 2016 Datacenter" - type: "string" - example: "Alpine Linux v3.5" - OSVersion: - description: | - Version of the host's operating system - -


- - > **Note**: The information returned in this field, including its - > very existence, and the formatting of values, should not be considered - > stable, and may change without notice. - type: "string" - example: "16.04" - OSType: - description: | - Generic type of the operating system of the host, as returned by the - Go runtime (`GOOS`). - - Currently returned values are "linux" and "windows". A full list of - possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). - type: "string" - example: "linux" - Architecture: - description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). - - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). - type: "string" - example: "x86_64" - NCPU: - description: | - The number of logical CPUs usable by the daemon. - - The number of available CPUs is checked by querying the operating - system when the daemon starts. Changes to operating system CPU - allocation after the daemon is started are not reflected. - type: "integer" - example: 4 - MemTotal: - description: | - Total amount of physical memory available on the host, in bytes. - type: "integer" - format: "int64" - example: 2095882240 - - IndexServerAddress: - description: | - Address / URL of the index server that is used for image search, - and as a default for user authentication for Docker Hub and Docker Cloud. - default: "https://index.docker.io/v1/" - type: "string" - example: "https://index.docker.io/v1/" - RegistryConfig: - $ref: "#/definitions/RegistryServiceConfig" - GenericResources: - $ref: "#/definitions/GenericResources" - HttpProxy: - description: | - HTTP-proxy configured for the daemon. This value is obtained from the - [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. - Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL - are masked in the API response. - - Containers do not automatically inherit this configuration. - type: "string" - example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" - HttpsProxy: - description: | - HTTPS-proxy configured for the daemon. This value is obtained from the - [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. - Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL - are masked in the API response. - - Containers do not automatically inherit this configuration. - type: "string" - example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" - NoProxy: - description: | - Comma-separated list of domain extensions for which no proxy should be - used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) - environment variable. - - Containers do not automatically inherit this configuration. - type: "string" - example: "*.local, 169.254/16" - Name: - description: "Hostname of the host." - type: "string" - example: "node5.corp.example.com" - Labels: - description: | - User-defined labels (key/value metadata) as set on the daemon. - -


- - > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, - > set through the daemon configuration, and _node_ labels, set from a - > manager node in the Swarm. Node labels are not included in this - > field. Node labels can be retrieved using the `/nodes/(id)` endpoint - > on a manager node in the Swarm. - type: "array" - items: - type: "string" - example: ["storage=ssd", "production"] - ExperimentalBuild: - description: | - Indicates if experimental features are enabled on the daemon. - type: "boolean" - example: true - ServerVersion: - description: | - Version string of the daemon. - type: "string" - example: "24.0.2" - Runtimes: - description: | - List of [OCI compliant](https://github.com/opencontainers/runtime-spec) - runtimes configured on the daemon. Keys hold the "name" used to - reference the runtime. - - The Docker daemon relies on an OCI compliant runtime (invoked via the - `containerd` daemon) as its interface to the Linux kernel namespaces, - cgroups, and SELinux. - - The default runtime is `runc`, and automatically configured. Additional - runtimes can be configured by the user and will be listed here. - type: "object" - additionalProperties: - $ref: "#/definitions/Runtime" - default: - runc: - path: "runc" - example: - runc: - path: "runc" - runc-master: - path: "/go/bin/runc" - custom: - path: "/usr/local/bin/my-oci-runtime" - runtimeArgs: ["--debug", "--systemd-cgroup=false"] - DefaultRuntime: - description: | - Name of the default OCI runtime that is used when starting containers. - - The default can be overridden per-container at create time. - type: "string" - default: "runc" - example: "runc" - Swarm: - $ref: "#/definitions/SwarmInfo" - LiveRestoreEnabled: - description: | - Indicates if live restore is enabled. - - If enabled, containers are kept running when the daemon is shutdown - or upon daemon start if running containers are detected. - type: "boolean" - default: false - example: false - Isolation: - description: | - Represents the isolation technology to use as a default for containers. - The supported values are platform-specific. - - If no isolation value is specified on daemon start, on Windows client, - the default is `hyperv`, and on Windows server, the default is `process`. - - This option is currently not used on other platforms. - default: "default" - type: "string" - enum: - - "default" - - "hyperv" - - "process" - InitBinary: - description: | - Name and, optional, path of the `docker-init` binary. - - If the path is omitted, the daemon searches the host's `$PATH` for the - binary and uses the first result. - type: "string" - example: "docker-init" - ContainerdCommit: - $ref: "#/definitions/Commit" - RuncCommit: - $ref: "#/definitions/Commit" - InitCommit: - $ref: "#/definitions/Commit" - SecurityOptions: - description: | - List of security features that are enabled on the daemon, such as - apparmor, seccomp, SELinux, user-namespaces (userns), rootless and - no-new-privileges. - - Additional configuration options for each security feature may - be present, and are included as a comma-separated list of key/value - pairs. - type: "array" - items: - type: "string" - example: - - "name=apparmor" - - "name=seccomp,profile=default" - - "name=selinux" - - "name=userns" - - "name=rootless" - ProductLicense: - description: | - Reports a summary of the product license on the daemon. - - If a commercial license has been applied to the daemon, information - such as number of nodes, and expiration are included. - type: "string" - example: "Community Engine" - DefaultAddressPools: - description: | - List of custom default address pools for local networks, which can be - specified in the daemon.json file or dockerd option. - - Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 - 10.10.[0-255].0/24 address pools. - type: "array" - items: - type: "object" - properties: - Base: - description: "The network address in CIDR format" - type: "string" - example: "10.10.0.0/16" - Size: - description: "The network pool size" - type: "integer" - example: "24" - Warnings: - description: | - List of warnings / informational messages about missing features, or - issues related to the daemon configuration. - - These messages can be printed by the client as information to the user. - type: "array" - items: - type: "string" - example: - - "WARNING: No memory limit support" - - "WARNING: bridge-nf-call-iptables is disabled" - - "WARNING: bridge-nf-call-ip6tables is disabled" - CDISpecDirs: - description: | - List of directories where (Container Device Interface) CDI - specifications are located. - - These specifications define vendor-specific modifications to an OCI - runtime specification for a container being created. - - An empty list indicates that CDI device injection is disabled. - - Note that since using CDI device injection requires the daemon to have - experimental enabled. For non-experimental daemons an empty list will - always be returned. - type: "array" - items: - type: "string" - example: - - "/etc/cdi" - - "/var/run/cdi" - - # PluginsInfo is a temp struct holding Plugins name - # registered with docker daemon. It is used by Info struct - PluginsInfo: - description: | - Available plugins per type. - -


- - > **Note**: Only unmanaged (V1) plugins are included in this list. - > V1 plugins are "lazily" loaded, and are not returned in this list - > if there is no resource using the plugin. - type: "object" - properties: - Volume: - description: "Names of available volume-drivers, and network-driver plugins." - type: "array" - items: - type: "string" - example: ["local"] - Network: - description: "Names of available network-drivers, and network-driver plugins." - type: "array" - items: - type: "string" - example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] - Authorization: - description: "Names of available authorization plugins." - type: "array" - items: - type: "string" - example: ["img-authz-plugin", "hbm"] - Log: - description: "Names of available logging-drivers, and logging-driver plugins." - type: "array" - items: - type: "string" - example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] - - - RegistryServiceConfig: - description: | - RegistryServiceConfig stores daemon registry services configuration. - type: "object" - x-nullable: true - properties: - AllowNondistributableArtifactsCIDRs: - description: | - List of IP ranges to which nondistributable artifacts can be pushed, - using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. - - type: "array" - items: - type: "string" - example: ["::1/128", "127.0.0.0/8"] - AllowNondistributableArtifactsHostnames: - description: | - List of registry hostnames to which nondistributable artifacts can be - pushed, using the format `[:]` or `[:]`. - - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. - type: "array" - items: - type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] - InsecureRegistryCIDRs: - description: | - List of IP ranges of insecure registries, using the CIDR syntax - ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries - accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates - from unknown CAs) communication. - - By default, local registries (`127.0.0.0/8`) are configured as - insecure. All other registries are secure. Communicating with an - insecure registry is not possible if the daemon assumes that registry - is secure. - - This configuration override this behavior, insecure communication with - registries whose resolved IP address is within the subnet described by - the CIDR syntax. - - Registries can also be marked insecure by hostname. Those registries - are listed under `IndexConfigs` and have their `Secure` field set to - `false`. - - > **Warning**: Using this option can be useful when running a local - > registry, but introduces security vulnerabilities. This option - > should therefore ONLY be used for testing purposes. For increased - > security, users should add their CA to their system's list of trusted - > CAs instead of enabling this option. - type: "array" - items: - type: "string" - example: ["::1/128", "127.0.0.0/8"] - IndexConfigs: - type: "object" - additionalProperties: - $ref: "#/definitions/IndexInfo" - example: - "127.0.0.1:5000": - "Name": "127.0.0.1:5000" - "Mirrors": [] - "Secure": false - "Official": false - "[2001:db8:a0b:12f0::1]:80": - "Name": "[2001:db8:a0b:12f0::1]:80" - "Mirrors": [] - "Secure": false - "Official": false - "docker.io": - Name: "docker.io" - Mirrors: ["https://hub-mirror.corp.example.com:5000/"] - Secure: true - Official: true - "registry.internal.corp.example.com:3000": - Name: "registry.internal.corp.example.com:3000" - Mirrors: [] - Secure: false - Official: false - Mirrors: - description: | - List of registry URLs that act as a mirror for the official - (`docker.io`) registry. - - type: "array" - items: - type: "string" - example: - - "https://hub-mirror.corp.example.com:5000/" - - "https://[2001:db8:a0b:12f0::1]/" - - IndexInfo: - description: - IndexInfo contains information about a registry. - type: "object" - x-nullable: true - properties: - Name: - description: | - Name of the registry, such as "docker.io". - type: "string" - example: "docker.io" - Mirrors: - description: | - List of mirrors, expressed as URIs. - type: "array" - items: - type: "string" - example: - - "https://hub-mirror.corp.example.com:5000/" - - "https://registry-2.docker.io/" - - "https://registry-3.docker.io/" - Secure: - description: | - Indicates if the registry is part of the list of insecure - registries. - - If `false`, the registry is insecure. Insecure registries accept - un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from - unknown CAs) communication. - - > **Warning**: Insecure registries can be useful when running a local - > registry. However, because its use creates security vulnerabilities - > it should ONLY be enabled for testing purposes. For increased - > security, users should add their CA to their system's list of - > trusted CAs instead of enabling this option. - type: "boolean" - example: true - Official: - description: | - Indicates whether this is an official registry (i.e., Docker Hub / docker.io) - type: "boolean" - example: true - - Runtime: - description: | - Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) - runtime. - - The runtime is invoked by the daemon via the `containerd` daemon. OCI - runtimes act as an interface to the Linux kernel namespaces, cgroups, - and SELinux. - type: "object" - properties: - path: - description: | - Name and, optional, path, of the OCI executable binary. - - If the path is omitted, the daemon searches the host's `$PATH` for the - binary and uses the first result. - type: "string" - example: "/usr/local/bin/my-oci-runtime" - runtimeArgs: - description: | - List of command-line arguments to pass to the runtime when invoked. - type: "array" - x-nullable: true - items: - type: "string" - example: ["--debug", "--systemd-cgroup=false"] - - Commit: - description: | - Commit holds the Git-commit (SHA1) that a binary was built from, as - reported in the version-string of external tools, such as `containerd`, - or `runC`. - type: "object" - properties: - ID: - description: "Actual commit ID of external tool." - type: "string" - example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" - Expected: - description: | - Commit ID of external tool expected by dockerd as set at build time. - type: "string" - example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" - - SwarmInfo: - description: | - Represents generic information about swarm. - type: "object" - properties: - NodeID: - description: "Unique identifier of for this node in the swarm." - type: "string" - default: "" - example: "k67qz4598weg5unwwffg6z1m1" - NodeAddr: - description: | - IP address at which this node can be reached by other nodes in the - swarm. - type: "string" - default: "" - example: "10.0.0.46" - LocalNodeState: - $ref: "#/definitions/LocalNodeState" - ControlAvailable: - type: "boolean" - default: false - example: true - Error: - type: "string" - default: "" - RemoteManagers: - description: | - List of ID's and addresses of other managers in the swarm. - type: "array" - default: null - x-nullable: true - items: - $ref: "#/definitions/PeerNode" - example: - - NodeID: "71izy0goik036k48jg985xnds" - Addr: "10.0.0.158:2377" - - NodeID: "79y6h1o4gv8n120drcprv5nmc" - Addr: "10.0.0.159:2377" - - NodeID: "k67qz4598weg5unwwffg6z1m1" - Addr: "10.0.0.46:2377" - Nodes: - description: "Total number of nodes in the swarm." - type: "integer" - x-nullable: true - example: 4 - Managers: - description: "Total number of managers in the swarm." - type: "integer" - x-nullable: true - example: 3 - Cluster: - $ref: "#/definitions/ClusterInfo" - - LocalNodeState: - description: "Current local status of this node." - type: "string" - default: "" - enum: - - "" - - "inactive" - - "pending" - - "active" - - "error" - - "locked" - example: "active" - - PeerNode: - description: "Represents a peer-node in the swarm" - type: "object" - properties: - NodeID: - description: "Unique identifier of for this node in the swarm." - type: "string" - Addr: - description: | - IP address and ports at which this node can be reached. - type: "string" - - NetworkAttachmentConfig: - description: | - Specifies how a service should be attached to a particular network. - type: "object" - properties: - Target: - description: | - The target network for attachment. Must be a network name or ID. - type: "string" - Aliases: - description: | - Discoverable alternate names for the service on this network. - type: "array" - items: - type: "string" - DriverOpts: - description: | - Driver attachment options for the network target. - type: "object" - additionalProperties: - type: "string" - - EventActor: - description: | - Actor describes something that generates events, like a container, network, - or a volume. - type: "object" - properties: - ID: - description: "The ID of the object emitting the event" - type: "string" - example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Attributes: - description: | - Various key/value attributes of the object, depending on its type. - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-label-value" - image: "alpine:latest" - name: "my-container" - - EventMessage: - description: | - EventMessage represents the information an event contains. - type: "object" - title: "SystemEventsResponse" - properties: - Type: - description: "The type of object emitting the event" - type: "string" - enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] - example: "container" - Action: - description: "The type of event" - type: "string" - example: "create" - Actor: - $ref: "#/definitions/EventActor" - scope: - description: | - Scope of the event. Engine events are `local` scope. Cluster (Swarm) - events are `swarm` scope. - type: "string" - enum: ["local", "swarm"] - time: - description: "Timestamp of event" - type: "integer" - format: "int64" - example: 1629574695 - timeNano: - description: "Timestamp of event, with nanosecond accuracy" - type: "integer" - format: "int64" - example: 1629574695515050031 - - OCIDescriptor: - type: "object" - x-go-name: Descriptor - description: | - A descriptor struct containing digest, media type, and size, as defined in - the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). - properties: - mediaType: - description: | - The media type of the object this schema refers to. - type: "string" - example: "application/vnd.docker.distribution.manifest.v2+json" - digest: - description: | - The digest of the targeted content. - type: "string" - example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" - size: - description: | - The size in bytes of the blob. - type: "integer" - format: "int64" - example: 3987495 - # TODO Not yet including these fields for now, as they are nil / omitted in our response. - # urls: - # description: | - # List of URLs from which this object MAY be downloaded. - # type: "array" - # items: - # type: "string" - # format: "uri" - # annotations: - # description: | - # Arbitrary metadata relating to the targeted content. - # type: "object" - # additionalProperties: - # type: "string" - # platform: - # $ref: "#/definitions/OCIPlatform" - - OCIPlatform: - type: "object" - x-go-name: Platform - description: | - Describes the platform which the image in the manifest runs on, as defined - in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). - properties: - architecture: - description: | - The CPU architecture, for example `amd64` or `ppc64`. - type: "string" - example: "arm" - os: - description: | - The operating system, for example `linux` or `windows`. - type: "string" - example: "windows" - os.version: - description: | - Optional field specifying the operating system version, for example on - Windows `10.0.19041.1165`. - type: "string" - example: "10.0.19041.1165" - os.features: - description: | - Optional field specifying an array of strings, each listing a required - OS feature (for example on Windows `win32k`). - type: "array" - items: - type: "string" - example: - - "win32k" - variant: - description: | - Optional field specifying a variant of the CPU, for example `v7` to - specify ARMv7 when architecture is `arm`. - type: "string" - example: "v7" - - DistributionInspect: - type: "object" - x-go-name: DistributionInspect - title: "DistributionInspectResponse" - required: [Descriptor, Platforms] - description: | - Describes the result obtained from contacting the registry to retrieve - image metadata. - properties: - Descriptor: - $ref: "#/definitions/OCIDescriptor" - Platforms: - type: "array" - description: | - An array containing all platforms supported by the image. - items: - $ref: "#/definitions/OCIPlatform" - - ClusterVolume: - type: "object" - description: | - Options and information specific to, and only present on, Swarm CSI - cluster volumes. - properties: - ID: - type: "string" - description: | - The Swarm ID of this volume. Because cluster volumes are Swarm - objects, they have an ID, unlike non-cluster volumes. This ID can - be used to refer to the Volume instead of the name. - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ClusterVolumeSpec" - Info: - type: "object" - description: | - Information about the global status of the volume. - properties: - CapacityBytes: - type: "integer" - format: "int64" - description: | - The capacity of the volume in bytes. A value of 0 indicates that - the capacity is unknown. - VolumeContext: - type: "object" - description: | - A map of strings to strings returned from the storage plugin when - the volume is created. - additionalProperties: - type: "string" - VolumeID: - type: "string" - description: | - The ID of the volume as returned by the CSI storage plugin. This - is distinct from the volume's ID as provided by Docker. This ID - is never used by the user when communicating with Docker to refer - to this volume. If the ID is blank, then the Volume has not been - successfully created in the plugin yet. - AccessibleTopology: - type: "array" - description: | - The topology this volume is actually accessible from. - items: - $ref: "#/definitions/Topology" - PublishStatus: - type: "array" - description: | - The status of the volume as it pertains to its publishing and use on - specific nodes - items: - type: "object" - properties: - NodeID: - type: "string" - description: | - The ID of the Swarm node the volume is published on. - State: - type: "string" - description: | - The published state of the volume. - * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. - * `published` The volume is published successfully to the node. - * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. - * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. - enum: - - "pending-publish" - - "published" - - "pending-node-unpublish" - - "pending-controller-unpublish" - PublishContext: - type: "object" - description: | - A map of strings to strings returned by the CSI controller - plugin when a volume is published. - additionalProperties: - type: "string" - - ClusterVolumeSpec: - type: "object" - description: | - Cluster-specific options used to create the volume. - properties: - Group: - type: "string" - description: | - Group defines the volume group of this volume. Volumes belonging to - the same group can be referred to by group name when creating - Services. Referring to a volume by group instructs Swarm to treat - volumes in that group interchangeably for the purpose of scheduling. - Volumes with an empty string for a group technically all belong to - the same, emptystring group. - AccessMode: - type: "object" - description: | - Defines how the volume is used by tasks. - properties: - Scope: - type: "string" - description: | - The set of nodes this volume can be used on at one time. - - `single` The volume may only be scheduled to one node at a time. - - `multi` the volume may be scheduled to any supported number of nodes at a time. - default: "single" - enum: ["single", "multi"] - x-nullable: false - Sharing: - type: "string" - description: | - The number and way that different tasks can use this volume - at one time. - - `none` The volume may only be used by one task at a time. - - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly - - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. - - `all` The volume may have any number of readers and writers. - default: "none" - enum: ["none", "readonly", "onewriter", "all"] - x-nullable: false - MountVolume: - type: "object" - description: | - Options for using this volume as a Mount-type volume. - - Either MountVolume or BlockVolume, but not both, must be - present. - properties: - FsType: - type: "string" - description: | - Specifies the filesystem type for the mount volume. - Optional. - MountFlags: - type: "array" - description: | - Flags to pass when mounting the volume. Optional. - items: - type: "string" - BlockVolume: - type: "object" - description: | - Options for using this volume as a Block-type volume. - Intentionally empty. - Secrets: - type: "array" - description: | - Swarm Secrets that are passed to the CSI storage plugin when - operating on this volume. - items: - type: "object" - description: | - One cluster volume secret entry. Defines a key-value pair that - is passed to the plugin. - properties: - Key: - type: "string" - description: | - Key is the name of the key of the key-value pair passed to - the plugin. - Secret: - type: "string" - description: | - Secret is the swarm Secret object from which to read data. - This can be a Secret name or ID. The Secret data is - retrieved by swarm and used as the value of the key-value - pair passed to the plugin. - AccessibilityRequirements: - type: "object" - description: | - Requirements for the accessible topology of the volume. These - fields are optional. For an in-depth description of what these - fields mean, see the CSI specification. - properties: - Requisite: - type: "array" - description: | - A list of required topologies, at least one of which the - volume must be accessible from. - items: - $ref: "#/definitions/Topology" - Preferred: - type: "array" - description: | - A list of topologies that the volume should attempt to be - provisioned in. - items: - $ref: "#/definitions/Topology" - CapacityRange: - type: "object" - description: | - The desired capacity that the volume should be created with. If - empty, the plugin will decide the capacity. - properties: - RequiredBytes: - type: "integer" - format: "int64" - description: | - The volume must be at least this big. The value of 0 - indicates an unspecified minimum - LimitBytes: - type: "integer" - format: "int64" - description: | - The volume must not be bigger than this. The value of 0 - indicates an unspecified maximum. - Availability: - type: "string" - description: | - The availability of the volume for use in tasks. - - `active` The volume is fully available for scheduling on the cluster - - `pause` No new workloads should use the volume, but existing workloads are not stopped. - - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. - default: "active" - x-nullable: false - enum: - - "active" - - "pause" - - "drain" - - Topology: - description: | - A map of topological domains to topological segments. For in depth - details, see documentation for the Topology object in the CSI - specification. - type: "object" - additionalProperties: - type: "string" - -paths: - /containers/json: - get: - summary: "List containers" - description: | - Returns a list of containers. For details on the format, see the - [inspect endpoint](#operation/ContainerInspect). - - Note that it uses a different, smaller representation of a container - than inspecting a single container. For example, the list of linked - containers is not propagated . - operationId: "ContainerList" - produces: - - "application/json" - parameters: - - name: "all" - in: "query" - description: | - Return all containers. By default, only running containers are shown. - type: "boolean" - default: false - - name: "limit" - in: "query" - description: | - Return this number of most recently created containers, including - non-running ones. - type: "integer" - - name: "size" - in: "query" - description: | - Return the size of container as fields `SizeRw` and `SizeRootFs`. - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - Filters to process on the container list, encoded as JSON (a - `map[string][]string`). For example, `{"status": ["paused"]}` will - only return paused containers. - - Available filters: - - - `ancestor`=(`[:]`, ``, or ``) - - `before`=(`` or ``) - - `expose`=(`[/]`|`/[]`) - - `exited=` containers with exit code of `` - - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - - `id=` a container's ID - - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - - `is-task=`(`true`|`false`) - - `label=key` or `label="key=value"` of a container label - - `name=` a container's name - - `network`=(`` or ``) - - `publish`=(`[/]`|`/[]`) - - `since`=(`` or ``) - - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - - `volume`=(`` or ``) - type: "string" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/ContainerSummary" - examples: - application/json: - - Id: "8dfafdbc3a40" - Names: - - "/boring_feynman" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 1" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: - - PrivatePort: 2222 - PublicPort: 3333 - Type: "tcp" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:02" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - - Id: "9cd87474be90" - Names: - - "/coolName" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 222222" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.8" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:08" - Mounts: [] - - Id: "3176a2479c92" - Names: - - "/sleepy_dog" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 3333333333333333" - Created: 1367854154 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.6" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:06" - Mounts: [] - - Id: "4cb07b47f9fb" - Names: - - "/running_cat" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 444444444444444444444444444444444" - Created: 1367854152 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.5" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:05" - Mounts: [] - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/create: - post: - summary: "Create a container" - operationId: "ContainerCreate" - consumes: - - "application/json" - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "name" - in: "query" - description: | - Assign the specified name to the container. Must match - `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. - type: "string" - pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - - name: "platform" - in: "query" - description: | - Platform in the format `os[/arch[/variant]]` used for image lookup. - - When specified, the daemon checks if the requested image is present - in the local image cache with the given OS and Architecture, and - otherwise returns a `404` status. - - If the option is not set, the host's native OS and Architecture are - used to look up the image in the image cache. However, if no platform - is passed and the given image does exist in the local image cache, - but its OS or architecture does not match, the container is created - with the available image, and a warning is added to the `Warnings` - field in the response, for example; - - WARNING: The requested image's platform (linux/arm64/v8) does not - match the detected host platform (linux/amd64) and no - specific platform was requested - - type: "string" - default: "" - - name: "body" - in: "body" - description: "Container to create" - schema: - allOf: - - $ref: "#/definitions/ContainerConfig" - - type: "object" - properties: - HostConfig: - $ref: "#/definitions/HostConfig" - NetworkingConfig: - $ref: "#/definitions/NetworkingConfig" - example: - Hostname: "" - Domainname: "" - User: "" - AttachStdin: false - AttachStdout: true - AttachStderr: true - Tty: false - OpenStdin: false - StdinOnce: false - Env: - - "FOO=bar" - - "BAZ=quux" - Cmd: - - "date" - Entrypoint: "" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - Volumes: - /volumes/data: {} - WorkingDir: "" - NetworkDisabled: false - MacAddress: "12:34:56:78:9a:bc" - ExposedPorts: - 22/tcp: {} - StopSignal: "SIGTERM" - StopTimeout: 10 - HostConfig: - Binds: - - "/tmp:/tmp" - Links: - - "redis3:redis" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - NanoCpus: 500000 - CpuPercent: 80 - CpuShares: 512 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpuQuota: 50000 - CpusetCpus: "0,1" - CpusetMems: "0,1" - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 300 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceWriteIOps: - - {} - DeviceRequests: - - Driver: "nvidia" - Count: -1 - DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] - Capabilities: [["gpu", "nvidia", "compute"]] - Options: - property1: "string" - property2: "string" - MemorySwappiness: 60 - OomKillDisable: false - OomScoreAdj: 500 - PidMode: "" - PidsLimit: 0 - PortBindings: - 22/tcp: - - HostPort: "11022" - PublishAllPorts: false - Privileged: false - ReadonlyRootfs: false - Dns: - - "8.8.8.8" - DnsOptions: - - "" - DnsSearch: - - "" - VolumesFrom: - - "parent" - - "other:ro" - CapAdd: - - "NET_ADMIN" - CapDrop: - - "MKNOD" - GroupAdd: - - "newgroup" - RestartPolicy: - Name: "" - MaximumRetryCount: 0 - AutoRemove: true - NetworkMode: "bridge" - Devices: [] - Ulimits: - - {} - LogConfig: - Type: "json-file" - Config: {} - SecurityOpt: [] - StorageOpt: {} - CgroupParent: "" - VolumeDriver: "" - ShmSize: 67108864 - NetworkingConfig: - EndpointsConfig: - isolated_nw: - IPAMConfig: - IPv4Address: "172.20.30.33" - IPv6Address: "2001:db8:abcd::3033" - LinkLocalIPs: - - "169.254.34.68" - - "fe80::3468" - Links: - - "container_1" - - "container_2" - Aliases: - - "server_x" - - "server_y" - - required: true - responses: - 201: - description: "Container created successfully" - schema: - $ref: "#/definitions/ContainerCreateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such image" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/{id}/json: - get: - summary: "Inspect a container" - description: "Return low-level information about a container." - operationId: "ContainerInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "ContainerInspectResponse" - properties: - Id: - description: "The ID of the container" - type: "string" - Created: - description: "The time the container was created" - type: "string" - Path: - description: "The path to the command being run" - type: "string" - Args: - description: "The arguments to the command being run" - type: "array" - items: - type: "string" - State: - $ref: "#/definitions/ContainerState" - Image: - description: "The container's image ID" - type: "string" - ResolvConfPath: - type: "string" - HostnamePath: - type: "string" - HostsPath: - type: "string" - LogPath: - type: "string" - Name: - type: "string" - RestartCount: - type: "integer" - Driver: - type: "string" - Platform: - type: "string" - MountLabel: - type: "string" - ProcessLabel: - type: "string" - AppArmorProfile: - type: "string" - ExecIDs: - description: "IDs of exec instances that are running in the container." - type: "array" - items: - type: "string" - x-nullable: true - HostConfig: - $ref: "#/definitions/HostConfig" - GraphDriver: - $ref: "#/definitions/GraphDriverData" - SizeRw: - description: | - The size of files that have been created or changed by this - container. - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container." - type: "integer" - format: "int64" - Mounts: - type: "array" - items: - $ref: "#/definitions/MountPoint" - Config: - $ref: "#/definitions/ContainerConfig" - NetworkSettings: - $ref: "#/definitions/NetworkSettings" - examples: - application/json: - AppArmorProfile: "" - Args: - - "-c" - - "exit 9" - Config: - AttachStderr: true - AttachStdin: false - AttachStdout: true - Cmd: - - "/bin/sh" - - "-c" - - "exit 9" - Domainname: "" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Healthcheck: - Test: ["CMD-SHELL", "exit 0"] - Hostname: "ba033ac44011" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - MacAddress: "" - NetworkDisabled: false - OpenStdin: false - StdinOnce: false - Tty: false - User: "" - Volumes: - /volumes/data: {} - WorkingDir: "" - StopSignal: "SIGTERM" - StopTimeout: 10 - Created: "2015-01-06T15:47:31.485331387Z" - Driver: "overlay2" - ExecIDs: - - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" - HostConfig: - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 0 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteIOps: - - {} - ContainerIDFile: "" - CpusetCpus: "" - CpusetMems: "" - CpuPercent: 80 - CpuShares: 0 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - Devices: [] - DeviceRequests: - - Driver: "nvidia" - Count: -1 - DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] - Capabilities: [["gpu", "nvidia", "compute"]] - Options: - property1: "string" - property2: "string" - IpcMode: "" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - OomKillDisable: false - OomScoreAdj: 500 - NetworkMode: "bridge" - PidMode: "" - PortBindings: {} - Privileged: false - ReadonlyRootfs: false - PublishAllPorts: false - RestartPolicy: - MaximumRetryCount: 2 - Name: "on-failure" - LogConfig: - Type: "json-file" - Sysctls: - net.ipv4.ip_forward: "1" - Ulimits: - - {} - VolumeDriver: "" - ShmSize: 67108864 - HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" - HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" - LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" - Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" - Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" - MountLabel: "" - Name: "/boring_euclid" - NetworkSettings: - Bridge: "" - SandboxID: "" - HairpinMode: false - LinkLocalIPv6Address: "" - LinkLocalIPv6PrefixLen: 0 - SandboxKey: "" - EndpointID: "" - Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - IPAddress: "" - IPPrefixLen: 0 - IPv6Gateway: "" - MacAddress: "" - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Path: "/bin/sh" - ProcessLabel: "" - ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" - RestartCount: 1 - State: - Error: "" - ExitCode: 9 - FinishedAt: "2015-01-06T15:47:32.080254511Z" - Health: - Status: "healthy" - FailingStreak: 0 - Log: - - Start: "2019-12-22T10:59:05.6385933Z" - End: "2019-12-22T10:59:05.8078452Z" - ExitCode: 0 - Output: "" - OOMKilled: false - Dead: false - Paused: false - Pid: 0 - Restarting: false - Running: true - StartedAt: "2015-01-06T15:47:32.072697474Z" - Status: "running" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "size" - in: "query" - type: "boolean" - default: false - description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" - tags: ["Container"] - /containers/{id}/top: - get: - summary: "List processes running inside a container" - description: | - On Unix systems, this is done by running the `ps` command. This endpoint - is not supported on Windows. - operationId: "ContainerTop" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "ContainerTopResponse" - description: "OK response to ContainerTop operation" - properties: - Titles: - description: "The ps column titles" - type: "array" - items: - type: "string" - Processes: - description: | - Each process running in the container, where each is process - is an array of values corresponding to the titles. - type: "array" - items: - type: "array" - items: - type: "string" - examples: - application/json: - Titles: - - "UID" - - "PID" - - "PPID" - - "C" - - "STIME" - - "TTY" - - "TIME" - - "CMD" - Processes: - - - - "root" - - "13642" - - "882" - - "0" - - "17:03" - - "pts/0" - - "00:00:00" - - "/bin/bash" - - - - "root" - - "13735" - - "13642" - - "0" - - "17:06" - - "pts/0" - - "00:00:00" - - "sleep 10" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "ps_args" - in: "query" - description: "The arguments to pass to `ps`. For example, `aux`" - type: "string" - default: "-ef" - tags: ["Container"] - /containers/{id}/logs: - get: - summary: "Get container logs" - description: | - Get `stdout` and `stderr` logs from a container. - - Note: This endpoint works only for containers with the `json-file` or - `journald` logging driver. - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - operationId: "ContainerLogs" - responses: - 200: - description: | - logs returned as a stream in response body. - For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). - Note that unlike the attach endpoint, the logs endpoint does not - upgrade the connection and does not set Content-Type. - schema: - type: "string" - format: "binary" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "until" - in: "query" - description: "Only return logs before this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. - type: "string" - default: "all" - tags: ["Container"] - /containers/{id}/changes: - get: - summary: "Get changes on a container’s filesystem" - description: | - Returns which files in a container's filesystem have been added, deleted, - or modified. The `Kind` of modification can be one of: - - - `0`: Modified ("C") - - `1`: Added ("A") - - `2`: Deleted ("D") - operationId: "ContainerChanges" - produces: ["application/json"] - responses: - 200: - description: "The list of changes" - schema: - type: "array" - items: - $ref: "#/definitions/FilesystemChange" - examples: - application/json: - - Path: "/dev" - Kind: 0 - - Path: "/dev/kmsg" - Kind: 1 - - Path: "/test" - Kind: 1 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/export: - get: - summary: "Export a container" - description: "Export the contents of a container as a tarball." - operationId: "ContainerExport" - produces: - - "application/octet-stream" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/stats: - get: - summary: "Get container stats based on resource usage" - description: | - This endpoint returns a live stream of a container’s resource usage - statistics. - - The `precpu_stats` is the CPU statistic of the *previous* read, and is - used to calculate the CPU usage percentage. It is not an exact copy - of the `cpu_stats` field. - - If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is - nil then for compatibility with older daemons the length of the - corresponding `cpu_usage.percpu_usage` array should be used. - - On a cgroup v2 host, the following fields are not set - * `blkio_stats`: all fields other than `io_service_bytes_recursive` - * `cpu_stats`: `cpu_usage.percpu_usage` - * `memory_stats`: `max_usage` and `failcnt` - Also, `memory_stats.stats` fields are incompatible with cgroup v1. - - To calculate the values shown by the `stats` command of the docker cli tool - the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` - * available_memory = `memory_stats.limit` - * Memory usage % = `(used_memory / available_memory) * 100.0` - * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` - * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` - * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` - * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` - operationId: "ContainerStats" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - type: "object" - examples: - application/json: - read: "2015-01-08T22:57:31.547920715Z" - pids_stats: - current: 3 - networks: - eth0: - rx_bytes: 5338 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 36 - tx_bytes: 648 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 8 - eth5: - rx_bytes: 4641 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 26 - tx_bytes: 690 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 9 - memory_stats: - stats: - total_pgmajfault: 0 - cache: 0 - mapped_file: 0 - total_inactive_file: 0 - pgpgout: 414 - rss: 6537216 - total_mapped_file: 0 - writeback: 0 - unevictable: 0 - pgpgin: 477 - total_unevictable: 0 - pgmajfault: 0 - total_rss: 6537216 - total_rss_huge: 6291456 - total_writeback: 0 - total_inactive_anon: 0 - rss_huge: 6291456 - hierarchical_memory_limit: 67108864 - total_pgfault: 964 - total_active_file: 0 - active_anon: 6537216 - total_active_anon: 6537216 - total_pgpgout: 414 - total_cache: 0 - inactive_anon: 0 - active_file: 0 - pgfault: 964 - inactive_file: 0 - total_pgpgin: 477 - max_usage: 6651904 - usage: 6537216 - failcnt: 0 - limit: 67108864 - blkio_stats: {} - cpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24472255 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100215355 - usage_in_kernelmode: 30000000 - system_cpu_usage: 739306590000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - precpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24350896 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100093996 - usage_in_kernelmode: 30000000 - system_cpu_usage: 9492140000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "stream" - in: "query" - description: | - Stream the output. If false, the stats will be output once and then - it will disconnect. - type: "boolean" - default: true - - name: "one-shot" - in: "query" - description: | - Only get a single stat instead of waiting for 2 cycles. Must be used - with `stream=false`. - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/resize: - post: - summary: "Resize a container TTY" - description: "Resize the TTY for a container." - operationId: "ContainerResize" - consumes: - - "application/octet-stream" - produces: - - "text/plain" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "cannot resize container" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "h" - in: "query" - description: "Height of the TTY session in characters" - type: "integer" - - name: "w" - in: "query" - description: "Width of the TTY session in characters" - type: "integer" - tags: ["Container"] - /containers/{id}/start: - post: - summary: "Start a container" - operationId: "ContainerStart" - responses: - 204: - description: "no error" - 304: - description: "container already started" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: | - Override the key sequence for detaching a container. Format is a - single character `[a-Z]` or `ctrl-` where `` is one - of: `a-z`, `@`, `^`, `[`, `,` or `_`. - type: "string" - tags: ["Container"] - /containers/{id}/stop: - post: - summary: "Stop a container" - operationId: "ContainerStop" - responses: - 204: - description: "no error" - 304: - description: "container already stopped" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: | - Signal to send to the container as an integer or string (e.g. `SIGINT`). - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/restart: - post: - summary: "Restart a container" - operationId: "ContainerRestart" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: | - Signal to send to the container as an integer or string (e.g. `SIGINT`). - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/kill: - post: - summary: "Kill a container" - description: | - Send a POSIX signal to a container, defaulting to killing to the - container. - operationId: "ContainerKill" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is not running" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: | - Signal to send to the container as an integer or string (e.g. `SIGINT`). - type: "string" - default: "SIGKILL" - tags: ["Container"] - /containers/{id}/update: - post: - summary: "Update a container" - description: | - Change various configuration options of a container without having to - recreate it. - operationId: "ContainerUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "The container has been updated." - schema: - type: "object" - title: "ContainerUpdateResponse" - description: "OK response to ContainerUpdate operation" - properties: - Warnings: - type: "array" - items: - type: "string" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "update" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - example: - BlkioWeight: 300 - CpuShares: 512 - CpuPeriod: 100000 - CpuQuota: 50000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpusetCpus: "0,1" - CpusetMems: "0" - Memory: 314572800 - MemorySwap: 514288000 - MemoryReservation: 209715200 - RestartPolicy: - MaximumRetryCount: 4 - Name: "on-failure" - tags: ["Container"] - /containers/{id}/rename: - post: - summary: "Rename a container" - operationId: "ContainerRename" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "name already in use" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "name" - in: "query" - required: true - description: "New name for the container" - type: "string" - tags: ["Container"] - /containers/{id}/pause: - post: - summary: "Pause a container" - description: | - Use the freezer cgroup to suspend all processes in a container. - - Traditionally, when suspending a process the `SIGSTOP` signal is used, - which is observable by the process being suspended. With the freezer - cgroup the process is unaware, and unable to capture, that it is being - suspended, and subsequently resumed. - operationId: "ContainerPause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/unpause: - post: - summary: "Unpause a container" - description: "Resume a container which has been paused." - operationId: "ContainerUnpause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/attach: - post: - summary: "Attach to a container" - description: | - Attach to a container to read its output or send it input. You can attach - to the same container multiple times and you can reattach to containers - that have been detached. - - Either the `stream` or `logs` parameter must be `true` for this endpoint - to do anything. - - See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) - for more details. - - ### Hijacking - - This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, - and `stderr` on the same socket. - - This is the response from the daemon for an attach request: - - ``` - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - [STREAM] - ``` - - After the headers and two new lines, the TCP connection can now be used - for raw, bidirectional communication between the client and server. - - To hint potential proxies about connection hijacking, the Docker client - can also optionally send connection upgrade headers. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 - Upgrade: tcp - Connection: Upgrade - ``` - - The Docker daemon will respond with a `101 UPGRADED` response, and will - similarly follow with the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Content-Type: application/vnd.docker.raw-stream - Connection: Upgrade - Upgrade: tcp - - [STREAM] - ``` - - ### Stream format - - When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), - the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream - and the stream over the hijacked connected is multiplexed to separate out - `stdout` and `stderr`. The stream consists of a series of frames, each - containing a header and a payload. - - The header contains the information which the stream writes (`stdout` or - `stderr`). It also contains the size of the associated frame encoded in - the last four bytes (`uint32`). - - It is encoded on the first eight bytes like this: - - ```go - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - ``` - - `STREAM_TYPE` can be: - - - 0: `stdin` (is written on `stdout`) - - 1: `stdout` - - 2: `stderr` - - `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size - encoded as big endian. - - Following the header is the payload, which is the specified number of - bytes of `STREAM_TYPE`. - - The simplest way to implement this protocol is the following: - - 1. Read 8 bytes. - 2. Choose `stdout` or `stderr` depending on the first byte. - 3. Extract the frame size from the last four bytes. - 4. Read the extracted size and output it on the correct output. - 5. Goto 1. - - ### Stream format when using a TTY - - When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), - the stream is not multiplexed. The data exchanged over the hijacked - connection is simply the raw data from the process PTY and client's - `stdin`. - - operationId: "ContainerAttach" - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: | - Override the key sequence for detaching a container.Format is a single - character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, - `@`, `^`, `[`, `,` or `_`. - type: "string" - - name: "logs" - in: "query" - description: | - Replay previous logs from the container. - - This is useful for attaching to a container that has started and you - want to output everything since the container started. - - If `stream` is also enabled, once all the previous output has been - returned, it will seamlessly transition into streaming current - output. - type: "boolean" - default: false - - name: "stream" - in: "query" - description: | - Stream attached streams from the time the request was made onwards. - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/attach/ws: - get: - summary: "Attach to a container via a websocket" - operationId: "ContainerAttachWebsocket" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: | - Override the key sequence for detaching a container.Format is a single - character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, - `@`, `^`, `[`, `,`, or `_`. - type: "string" - - name: "logs" - in: "query" - description: "Return logs" - type: "boolean" - default: false - - name: "stream" - in: "query" - description: "Return stream" - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/wait: - post: - summary: "Wait for a container" - description: "Block until a container stops, then returns the exit code." - operationId: "ContainerWait" - produces: ["application/json"] - responses: - 200: - description: "The container has exit." - schema: - $ref: "#/definitions/ContainerWaitResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "condition" - in: "query" - description: | - Wait until a container state reaches the given condition. - - Defaults to `not-running` if omitted or empty. - type: "string" - enum: - - "not-running" - - "next-exit" - - "removed" - default: "not-running" - tags: ["Container"] - /containers/{id}: - delete: - summary: "Remove a container" - operationId: "ContainerDelete" - responses: - 204: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: | - You cannot remove a running container: c2ada9df5af8. Stop the - container before attempting removal or force remove - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "v" - in: "query" - description: "Remove anonymous volumes associated with the container." - type: "boolean" - default: false - - name: "force" - in: "query" - description: "If the container is running, kill it before removing it." - type: "boolean" - default: false - - name: "link" - in: "query" - description: "Remove the specified link associated with the container." - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/archive: - head: - summary: "Get information about files in a container" - description: | - A response header `X-Docker-Container-Path-Stat` is returned, containing - a base64 - encoded JSON object with some filesystem header information - about the path. - operationId: "ContainerArchiveInfo" - responses: - 200: - description: "no error" - headers: - X-Docker-Container-Path-Stat: - type: "string" - description: | - A base64 - encoded JSON object with some filesystem header - information about the path - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - get: - summary: "Get an archive of a filesystem resource in a container" - description: "Get a tar archive of a resource in the filesystem of container id." - operationId: "ContainerArchive" - produces: ["application/x-tar"] - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - put: - summary: "Extract an archive of files or folders to a directory in a container" - description: | - Upload a tar archive to be extracted to a path in the filesystem of container id. - `path` parameter is asserted to be a directory. If it exists as a file, 400 error - will be returned with message "not a directory". - operationId: "PutContainerArchive" - consumes: ["application/x-tar", "application/octet-stream"] - responses: - 200: - description: "The content was extracted successfully" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "not a directory" - 403: - description: "Permission denied, the volume or container rootfs is marked as read-only." - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such container or path does not exist inside the container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Path to a directory in the container to extract the archive’s contents into. " - type: "string" - - name: "noOverwriteDirNonDir" - in: "query" - description: | - If `1`, `true`, or `True` then it will be an error if unpacking the - given content would cause an existing directory to be replaced with - a non-directory and vice versa. - type: "string" - - name: "copyUIDGID" - in: "query" - description: | - If `1`, `true`, then it will copy UID/GID maps to the dest file or - dir - type: "string" - - name: "inputStream" - in: "body" - required: true - description: | - The input stream must be a tar archive compressed with one of the - following algorithms: `identity` (no compression), `gzip`, `bzip2`, - or `xz`. - schema: - type: "string" - format: "binary" - tags: ["Container"] - /containers/prune: - post: - summary: "Delete stopped containers" - produces: - - "application/json" - operationId: "ContainerPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ContainerPruneResponse" - properties: - ContainersDeleted: - description: "Container IDs that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /images/json: - get: - summary: "List Images" - description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." - operationId: "ImageList" - produces: - - "application/json" - responses: - 200: - description: "Summary image data for the images matching the query" - schema: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "all" - in: "query" - description: "Show all images. Only images from a final layer (no children) are shown by default." - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the images list. - - Available filters: - - - `before`=(`[:]`, `` or ``) - - `dangling=true` - - `label=key` or `label="key=value"` of an image label - - `reference`=(`[:]`) - - `since`=(`[:]`, `` or ``) - type: "string" - - name: "shared-size" - in: "query" - description: "Compute and show shared size as a `SharedSize` field on each image." - type: "boolean" - default: false - - name: "digests" - in: "query" - description: "Show digest information as a `RepoDigests` field on each image." - type: "boolean" - default: false - tags: ["Image"] - /build: - post: - summary: "Build an image" - description: | - Build an image from a tar archive with a `Dockerfile` in it. - - The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). - - The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. - - The build is canceled if the client drops the connection by quitting or being killed. - operationId: "ImageBuild" - consumes: - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "inputStream" - in: "body" - description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." - schema: - type: "string" - format: "binary" - - name: "dockerfile" - in: "query" - description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." - type: "string" - default: "Dockerfile" - - name: "t" - in: "query" - description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." - type: "string" - - name: "extrahosts" - in: "query" - description: "Extra hosts to add to /etc/hosts" - type: "string" - - name: "remote" - in: "query" - description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." - type: "string" - - name: "q" - in: "query" - description: "Suppress verbose build output." - type: "boolean" - default: false - - name: "nocache" - in: "query" - description: "Do not use the cache when building the image." - type: "boolean" - default: false - - name: "cachefrom" - in: "query" - description: "JSON array of images used for build cache resolution." - type: "string" - - name: "pull" - in: "query" - description: "Attempt to pull the image even if an older image exists locally." - type: "string" - - name: "rm" - in: "query" - description: "Remove intermediate containers after a successful build." - type: "boolean" - default: true - - name: "forcerm" - in: "query" - description: "Always remove intermediate containers, even upon failure." - type: "boolean" - default: false - - name: "memory" - in: "query" - description: "Set memory limit for build." - type: "integer" - - name: "memswap" - in: "query" - description: "Total memory (memory + swap). Set as `-1` to disable swap." - type: "integer" - - name: "cpushares" - in: "query" - description: "CPU shares (relative weight)." - type: "integer" - - name: "cpusetcpus" - in: "query" - description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." - type: "string" - - name: "cpuperiod" - in: "query" - description: "The length of a CPU period in microseconds." - type: "integer" - - name: "cpuquota" - in: "query" - description: "Microseconds of CPU time that the container can get in a CPU period." - type: "integer" - - name: "buildargs" - in: "query" - description: > - JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker - uses the buildargs as the environment context for commands run via the `Dockerfile` RUN - instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for - passing secret values. - - - For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the - query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. - - - [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) - type: "string" - - name: "shmsize" - in: "query" - description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." - type: "integer" - - name: "squash" - in: "query" - description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" - type: "boolean" - - name: "labels" - in: "query" - description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." - type: "string" - - name: "networkmode" - in: "query" - description: | - Sets the networking mode for the run commands during build. Supported - standard values are: `bridge`, `host`, `none`, and `container:`. - Any other value is taken as a custom network's name or ID to which this - container should connect to. - type: "string" - - name: "Content-type" - in: "header" - type: "string" - enum: - - "application/x-tar" - default: "application/x-tar" - - name: "X-Registry-Config" - in: "header" - description: | - This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. - - The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: - - ``` - { - "docker.example.com": { - "username": "janedoe", - "password": "hunter2" - }, - "https://index.docker.io/v1/": { - "username": "mobydock", - "password": "conta1n3rize14" - } - } - ``` - - Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. - type: "string" - - name: "platform" - in: "query" - description: "Platform in the format os[/arch[/variant]]" - type: "string" - default: "" - - name: "target" - in: "query" - description: "Target build stage" - type: "string" - default: "" - - name: "outputs" - in: "query" - description: "BuildKit output configuration" - type: "string" - default: "" - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /build/prune: - post: - summary: "Delete builder cache" - produces: - - "application/json" - operationId: "BuildPrune" - parameters: - - name: "keep-storage" - in: "query" - description: "Amount of disk space in bytes to keep for cache" - type: "integer" - format: "int64" - - name: "all" - in: "query" - type: "boolean" - description: "Remove all types of build cache" - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the list of build cache objects. - - Available filters: - - - `until=` remove cache older than ``. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. - - `id=` - - `parent=` - - `type=` - - `description=` - - `inuse` - - `shared` - - `private` - responses: - 200: - description: "No error" - schema: - type: "object" - title: "BuildPruneResponse" - properties: - CachesDeleted: - type: "array" - items: - description: "ID of build cache object" - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /images/create: - post: - summary: "Create an image" - description: "Pull or import an image." - operationId: "ImageCreate" - consumes: - - "text/plain" - - "application/octet-stream" - produces: - - "application/json" - responses: - 200: - description: "no error" - 404: - description: "repository does not exist or no read access" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "fromImage" - in: "query" - description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." - type: "string" - - name: "fromSrc" - in: "query" - description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." - type: "string" - - name: "repo" - in: "query" - description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." - type: "string" - - name: "tag" - in: "query" - description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." - type: "string" - - name: "message" - in: "query" - description: "Set commit message for imported image." - type: "string" - - name: "inputImage" - in: "body" - description: "Image content if the value `-` has been specified in fromSrc query parameter" - schema: - type: "string" - required: false - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - name: "changes" - in: "query" - description: | - Apply `Dockerfile` instructions to the image that is created, - for example: `changes=ENV DEBUG=true`. - Note that `ENV DEBUG=true` should be URI component encoded. - - Supported `Dockerfile` instructions: - `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - type: "array" - items: - type: "string" - - name: "platform" - in: "query" - description: | - Platform in the format os[/arch[/variant]]. - - When used in combination with the `fromImage` option, the daemon checks - if the given image is present in the local image cache with the given - OS and Architecture, and otherwise attempts to pull the image. If the - option is not set, the host's native OS and Architecture are used. - If the given image does not exist in the local image cache, the daemon - attempts to pull the image with the host's native OS and Architecture. - If the given image does exists in the local image cache, but its OS or - architecture does not match, a warning is produced. - - When used with the `fromSrc` option to import an image from an archive, - this option sets the platform information for the imported image. If - the option is not set, the host's native OS and Architecture are used - for the imported image. - type: "string" - default: "" - tags: ["Image"] - /images/{name}/json: - get: - summary: "Inspect an image" - description: "Return low-level information about an image." - operationId: "ImageInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/ImageInspect" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - tags: ["Image"] - /images/{name}/history: - get: - summary: "Get the history of an image" - description: "Return parent layers of an image." - operationId: "ImageHistory" - produces: ["application/json"] - responses: - 200: - description: "List of image layers" - schema: - type: "array" - items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false - examples: - application/json: - - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" - Created: 1398108230 - CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" - Tags: - - "ubuntu:lucid" - - "ubuntu:10.04" - Size: 182964289 - Comment: "" - - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" - Created: 1398108222 - CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" - Tags: [] - Size: 0 - Comment: "" - - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" - Created: 1371157430 - CreatedBy: "" - Tags: - - "scratch12:latest" - - "scratch:latest" - Size: 0 - Comment: "Imported from -" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - tags: ["Image"] - /images/{name}/push: - post: - summary: "Push an image" - description: | - Push an image to a registry. - - If you wish to push an image on to a private registry, that image must - already have a tag which references the registry. For example, - `registry.example.com/myimage:latest`. - - The push is cancelled if the HTTP connection is closed. - operationId: "ImagePush" - consumes: - - "application/octet-stream" - responses: - 200: - description: "No error" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID." - type: "string" - required: true - - name: "tag" - in: "query" - description: "The tag to associate with the image on the registry." - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - required: true - tags: ["Image"] - /images/{name}/tag: - post: - summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." - operationId: "ImageTag" - responses: - 201: - description: "No error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID to tag." - type: "string" - required: true - - name: "repo" - in: "query" - description: "The repository to tag in. For example, `someuser/someimage`." - type: "string" - - name: "tag" - in: "query" - description: "The name of the new tag." - type: "string" - tags: ["Image"] - /images/{name}: - delete: - summary: "Remove an image" - description: | - Remove an image, along with any untagged parent images that were - referenced by that image. - - Images can't be removed if they have descendant images, are being - used by a running container or are being used by a build. - operationId: "ImageDelete" - produces: ["application/json"] - responses: - 200: - description: "The image was deleted successfully" - schema: - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - examples: - application/json: - - Untagged: "3e2f21a89f" - - Deleted: "3e2f21a89f" - - Deleted: "53b4f83ac9" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - - name: "force" - in: "query" - description: "Remove the image even if it is being used by stopped containers or has other tags" - type: "boolean" - default: false - - name: "noprune" - in: "query" - description: "Do not delete untagged parent images" - type: "boolean" - default: false - tags: ["Image"] - /images/search: - get: - summary: "Search images" - description: "Search for an image on Docker Hub." - operationId: "ImageSearch" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - type: "object" - title: "ImageSearchResponseItem" - properties: - description: - type: "string" - is_official: - type: "boolean" - is_automated: - description: | - Whether this repository has automated builds enabled. - -


- - > **Deprecated**: This field is deprecated and will always - > be "false" in future. - type: "boolean" - example: false - name: - type: "string" - star_count: - type: "integer" - examples: - application/json: - - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" - is_official: true - is_automated: false - name: "alpine" - star_count: 10093 - - description: "Busybox base image." - is_official: true - is_automated: false - name: "Busybox base image." - star_count: 3037 - - description: "The PostgreSQL object-relational database system provides reliability and data integrity." - is_official: true - is_automated: false - name: "postgres" - star_count: 12408 - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "term" - in: "query" - description: "Term to search" - type: "string" - required: true - - name: "limit" - in: "query" - description: "Maximum number of results to return" - type: "integer" - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - - `is-automated=(true|false)` (deprecated, see below) - - `is-official=(true|false)` - - `stars=` Matches images that has at least 'number' stars. - - The `is-automated` filter is deprecated. The `is_automated` field has - been deprecated by Docker Hub's search API. Consequently, searching - for `is-automated=true` will yield no results. - type: "string" - tags: ["Image"] - /images/prune: - post: - summary: "Delete unused images" - produces: - - "application/json" - operationId: "ImagePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - - - `dangling=` When set to `true` (or `1`), prune only - unused *and* untagged images. When set to `false` - (or `0`), all unused images are pruned. - - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ImagePruneResponse" - properties: - ImagesDeleted: - description: "Images that were deleted" - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /auth: - post: - summary: "Check auth configuration" - description: | - Validate credentials for a registry and, if available, get an identity - token for accessing the registry without password. - operationId: "SystemAuth" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "An identity token was generated successfully." - schema: - type: "object" - title: "SystemAuthResponse" - required: [Status] - properties: - Status: - description: "The status of the authentication" - type: "string" - x-nullable: false - IdentityToken: - description: "An opaque token used to authenticate a user after a successful login" - type: "string" - x-nullable: false - examples: - application/json: - Status: "Login Succeeded" - IdentityToken: "9cbaf023786cd7..." - 204: - description: "No error" - 401: - description: "Auth error" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "authConfig" - in: "body" - description: "Authentication to check" - schema: - $ref: "#/definitions/AuthConfig" - tags: ["System"] - /info: - get: - summary: "Get system information" - operationId: "SystemInfo" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/SystemInfo" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /version: - get: - summary: "Get version" - description: "Returns the version of Docker that is running and various information about the system that Docker is running on." - operationId: "SystemVersion" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/SystemVersion" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /_ping: - get: - summary: "Ping" - description: "This is a dummy endpoint you can use to test if the server is accessible." - operationId: "SystemPing" - produces: ["text/plain"] - responses: - 200: - description: "no error" - schema: - type: "string" - example: "OK" - headers: - API-Version: - type: "string" - description: "Max API Version the server supports" - Builder-Version: - type: "string" - description: | - Default version of docker image builder - - The default on Linux is version "2" (BuildKit), but the daemon - can be configured to recommend version "1" (classic Builder). - Windows does not yet support BuildKit for native Windows images, - and uses "1" (classic builder) as a default. - - This value is a recommendation as advertised by the daemon, and - it is up to the client to choose which builder to use. - default: "2" - Docker-Experimental: - type: "boolean" - description: "If the server is running with experimental mode enabled" - Swarm: - type: "string" - enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] - description: | - Contains information about Swarm status of the daemon, - and if the daemon is acting as a manager or worker node. - default: "inactive" - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - headers: - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - tags: ["System"] - head: - summary: "Ping" - description: "This is a dummy endpoint you can use to test if the server is accessible." - operationId: "SystemPingHead" - produces: ["text/plain"] - responses: - 200: - description: "no error" - schema: - type: "string" - example: "(empty)" - headers: - API-Version: - type: "string" - description: "Max API Version the server supports" - Builder-Version: - type: "string" - description: "Default version of docker image builder" - Docker-Experimental: - type: "boolean" - description: "If the server is running with experimental mode enabled" - Swarm: - type: "string" - enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] - description: | - Contains information about Swarm status of the daemon, - and if the daemon is acting as a manager or worker node. - default: "inactive" - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /commit: - post: - summary: "Create a new image from a container" - operationId: "ImageCommit" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "containerConfig" - in: "body" - description: "The container configuration" - schema: - $ref: "#/definitions/ContainerConfig" - - name: "container" - in: "query" - description: "The ID or name of the container to commit" - type: "string" - - name: "repo" - in: "query" - description: "Repository name for the created image" - type: "string" - - name: "tag" - in: "query" - description: "Tag name for the create image" - type: "string" - - name: "comment" - in: "query" - description: "Commit message" - type: "string" - - name: "author" - in: "query" - description: "Author of the image (e.g., `John Hannibal Smith `)" - type: "string" - - name: "pause" - in: "query" - description: "Whether to pause the container before committing" - type: "boolean" - default: true - - name: "changes" - in: "query" - description: "`Dockerfile` instructions to apply while committing" - type: "string" - tags: ["Image"] - /events: - get: - summary: "Monitor events" - description: | - Stream real-time events from the server. - - Various objects within Docker report events when something happens to them. - - Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` - - Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` - - Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` - - Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` - - The Docker daemon reports these events: `reload` - - Services report these events: `create`, `update`, and `remove` - - Nodes report these events: `create`, `update`, and `remove` - - Secrets report these events: `create`, `update`, and `remove` - - Configs report these events: `create`, `update`, and `remove` - - The Builder reports `prune` events - - operationId: "SystemEvents" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/EventMessage" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "since" - in: "query" - description: "Show events created since this timestamp then stream new events." - type: "string" - - name: "until" - in: "query" - description: "Show events created until this timestamp then stop streaming." - type: "string" - - name: "filters" - in: "query" - description: | - A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - - - `config=` config name or ID - - `container=` container name or ID - - `daemon=` daemon name or ID - - `event=` event type - - `image=` image name or ID - - `label=` image or container label - - `network=` network name or ID - - `node=` node ID - - `plugin`= plugin name or ID - - `scope`= local or swarm - - `secret=` secret name or ID - - `service=` service name or ID - - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - - `volume=` volume name - type: "string" - tags: ["System"] - /system/df: - get: - summary: "Get data usage information" - operationId: "SystemDataUsage" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "SystemDataUsageResponse" - properties: - LayersSize: - type: "integer" - format: "int64" - Images: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - Containers: - type: "array" - items: - $ref: "#/definitions/ContainerSummary" - Volumes: - type: "array" - items: - $ref: "#/definitions/Volume" - BuildCache: - type: "array" - items: - $ref: "#/definitions/BuildCache" - example: - LayersSize: 1092588 - Images: - - - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - ParentId: "" - RepoTags: - - "busybox:latest" - RepoDigests: - - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" - Created: 1466724217 - Size: 1092588 - SharedSize: 0 - Labels: {} - Containers: 1 - Containers: - - - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" - Names: - - "/top" - Image: "busybox" - ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - Command: "top" - Created: 1472592424 - Ports: [] - SizeRootFs: 1092588 - Labels: {} - State: "exited" - Status: "Exited (0) 56 minutes ago" - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - IPAMConfig: null - Links: null - Aliases: null - NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" - EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" - Gateway: "172.18.0.1" - IPAddress: "172.18.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Mounts: [] - Volumes: - - - Name: "my-volume" - Driver: "local" - Mountpoint: "/var/lib/docker/volumes/my-volume/_data" - Labels: null - Scope: "local" - Options: null - UsageData: - Size: 10920104 - RefCount: 2 - BuildCache: - - - ID: "hw53o5aio51xtltp5xjp8v7fx" - Parents: [] - Type: "regular" - Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" - InUse: false - Shared: true - Size: 0 - CreatedAt: "2021-06-28T13:31:01.474619385Z" - LastUsedAt: "2021-07-07T22:02:32.738075951Z" - UsageCount: 26 - - - ID: "ndlpt0hhvkqcdfkputsk4cq9c" - Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] - Type: "regular" - Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" - InUse: false - Shared: true - Size: 51 - CreatedAt: "2021-06-28T13:31:03.002625487Z" - LastUsedAt: "2021-07-07T22:02:32.773909517Z" - UsageCount: 26 - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "type" - in: "query" - description: | - Object types, for which to compute and return data. - type: "array" - collectionFormat: multi - items: - type: "string" - enum: ["container", "image", "volume", "build-cache"] - tags: ["System"] - /images/{name}/get: - get: - summary: "Export an image" - description: | - Get a tarball containing all images and metadata for a repository. - - If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. - - ### Image tarball format - - An image tarball contains one directory per image layer (named using its long ID), each containing these files: - - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer - - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. - - If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. - - ```json - { - "hello-world": { - "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" - } - } - ``` - operationId: "ImageGet" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - tags: ["Image"] - /images/get: - get: - summary: "Export several images" - description: | - Get a tarball containing all images and metadata for several image - repositories. - - For each value of the `names` parameter: if it is a specific name and - tag (e.g. `ubuntu:latest`), then only that image (and its parents) are - returned; if it is an image ID, similarly only that image (and its parents) - are returned and there would be no names referenced in the 'repositories' - file for this image ID. - - For details on the format, see the [export image endpoint](#operation/ImageGet). - operationId: "ImageGetAll" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "names" - in: "query" - description: "Image names to filter by" - type: "array" - items: - type: "string" - tags: ["Image"] - /images/load: - post: - summary: "Import images" - description: | - Load a set of images and tags into a repository. - - For details on the format, see the [export image endpoint](#operation/ImageGet). - operationId: "ImageLoad" - consumes: - - "application/x-tar" - produces: - - "application/json" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "imagesTarball" - in: "body" - description: "Tar archive containing images" - schema: - type: "string" - format: "binary" - - name: "quiet" - in: "query" - description: "Suppress progress details during load." - type: "boolean" - default: false - tags: ["Image"] - /containers/{id}/exec: - post: - summary: "Create an exec instance" - description: "Run a command inside a running container." - operationId: "ContainerExec" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is paused" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execConfig" - in: "body" - description: "Exec configuration" - schema: - type: "object" - title: "ExecConfig" - properties: - AttachStdin: - type: "boolean" - description: "Attach to `stdin` of the exec command." - AttachStdout: - type: "boolean" - description: "Attach to `stdout` of the exec command." - AttachStderr: - type: "boolean" - description: "Attach to `stderr` of the exec command." - ConsoleSize: - type: "array" - description: "Initial console size, as an `[height, width]` array." - x-nullable: true - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - DetachKeys: - type: "string" - description: | - Override the key sequence for detaching a container. Format is - a single character `[a-Z]` or `ctrl-` where `` - is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - Env: - description: | - A list of environment variables in the form `["VAR=value", ...]`. - type: "array" - items: - type: "string" - Cmd: - type: "array" - description: "Command to run, as a string or array of strings." - items: - type: "string" - Privileged: - type: "boolean" - description: "Runs the exec process with extended privileges." - default: false - User: - type: "string" - description: | - The user, and optionally, group to run the exec process inside - the container. Format is one of: `user`, `user:group`, `uid`, - or `uid:gid`. - WorkingDir: - type: "string" - description: | - The working directory for the exec process inside the container. - example: - AttachStdin: false - AttachStdout: true - AttachStderr: true - DetachKeys: "ctrl-p,ctrl-q" - Tty: false - Cmd: - - "date" - Env: - - "FOO=bar" - - "BAZ=quux" - required: true - - name: "id" - in: "path" - description: "ID or name of container" - type: "string" - required: true - tags: ["Exec"] - /exec/{id}/start: - post: - summary: "Start an exec instance" - description: | - Starts a previously set up exec instance. If detach is true, this endpoint - returns immediately after starting the command. Otherwise, it sets up an - interactive session with the command. - operationId: "ExecStart" - consumes: - - "application/json" - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - responses: - 200: - description: "No error" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Container is stopped or paused" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execStartConfig" - in: "body" - schema: - type: "object" - title: "ExecStartConfig" - properties: - Detach: - type: "boolean" - description: "Detach from the command." - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - ConsoleSize: - type: "array" - description: "Initial console size, as an `[height, width]` array." - x-nullable: true - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - example: - Detach: false - Tty: true - ConsoleSize: [80, 64] - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - /exec/{id}/resize: - post: - summary: "Resize an exec instance" - description: | - Resize the TTY session used by an exec instance. This endpoint only works - if `tty` was specified as part of creating and starting the exec instance. - operationId: "ExecResize" - responses: - 200: - description: "No error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - - name: "h" - in: "query" - description: "Height of the TTY session in characters" - type: "integer" - - name: "w" - in: "query" - description: "Width of the TTY session in characters" - type: "integer" - tags: ["Exec"] - /exec/{id}/json: - get: - summary: "Inspect an exec instance" - description: "Return low-level information about an exec instance." - operationId: "ExecInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ExecInspectResponse" - properties: - CanRemove: - type: "boolean" - DetachKeys: - type: "string" - ID: - type: "string" - Running: - type: "boolean" - ExitCode: - type: "integer" - ProcessConfig: - $ref: "#/definitions/ProcessConfig" - OpenStdin: - type: "boolean" - OpenStderr: - type: "boolean" - OpenStdout: - type: "boolean" - ContainerID: - type: "string" - Pid: - type: "integer" - description: "The system process ID for the exec process." - examples: - application/json: - CanRemove: false - ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" - DetachKeys: "" - ExitCode: 2 - ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" - OpenStderr: true - OpenStdin: true - OpenStdout: true - ProcessConfig: - arguments: - - "-c" - - "exit 2" - entrypoint: "sh" - privileged: false - tty: true - user: "1000" - Running: false - Pid: 42000 - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - - /volumes: - get: - summary: "List volumes" - operationId: "VolumeList" - produces: ["application/json"] - responses: - 200: - description: "Summary volume data that matches the query" - schema: - $ref: "#/definitions/VolumeListResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to - process on the volumes list. Available filters: - - - `dangling=` When set to `true` (or `1`), returns all - volumes that are not in use by a container. When set to `false` - (or `0`), only volumes that are in use by one or more - containers are returned. - - `driver=` Matches volumes based on their driver. - - `label=` or `label=:` Matches volumes based on - the presence of a `label` alone or a `label` and a value. - - `name=` Matches all or part of a volume name. - type: "string" - format: "json" - tags: ["Volume"] - - /volumes/create: - post: - summary: "Create a volume" - operationId: "VolumeCreate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 201: - description: "The volume was created successfully" - schema: - $ref: "#/definitions/Volume" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "volumeConfig" - in: "body" - required: true - description: "Volume configuration" - schema: - $ref: "#/definitions/VolumeCreateOptions" - tags: ["Volume"] - - /volumes/{name}: - get: - summary: "Inspect a volume" - operationId: "VolumeInspect" - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Volume" - 404: - description: "No such volume" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - tags: ["Volume"] - - put: - summary: | - "Update a volume. Valid only for Swarm cluster volumes" - operationId: "VolumeUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such volume" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name or ID of the volume" - type: "string" - required: true - - name: "body" - in: "body" - schema: - # though the schema for is an object that contains only a - # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object - # means that if, later on, we support things like changing the - # labels, we can do so without duplicating that information to the - # ClusterVolumeSpec. - type: "object" - description: "Volume configuration" - properties: - Spec: - $ref: "#/definitions/ClusterVolumeSpec" - description: | - The spec of the volume to update. Currently, only Availability may - change. All other fields must remain unchanged. - - name: "version" - in: "query" - description: | - The version number of the volume being updated. This is required to - avoid conflicting writes. Found in the volume's `ClusterVolume` - field. - type: "integer" - format: "int64" - required: true - tags: ["Volume"] - - delete: - summary: "Remove a volume" - description: "Instruct the driver to remove the volume." - operationId: "VolumeDelete" - responses: - 204: - description: "The volume was removed" - 404: - description: "No such volume or volume driver" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Volume is in use and cannot be removed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - - name: "force" - in: "query" - description: "Force the removal of the volume" - type: "boolean" - default: false - tags: ["Volume"] - - /volumes/prune: - post: - summary: "Delete unused volumes" - produces: - - "application/json" - operationId: "VolumePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. - - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "VolumePruneResponse" - properties: - VolumesDeleted: - description: "Volumes that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Volume"] - /networks: - get: - summary: "List networks" - description: | - Returns a list of networks. For details on the format, see the - [network inspect endpoint](#operation/NetworkInspect). - - Note that it uses a different, smaller representation of a network than - inspecting a single network. For example, the list of containers attached - to the network is not propagated in API versions 1.28 and up. - operationId: "NetworkList" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Network" - examples: - application/json: - - Name: "bridge" - Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" - Created: "2016-10-19T06:21:00.416543526Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: - - - Subnet: "172.17.0.0/16" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - - Name: "none" - Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "null" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - - Name: "host" - Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "host" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to process - on the networks list. - - Available filters: - - - `dangling=` When set to `true` (or `1`), returns all - networks that are not in use by a container. When set to `false` - (or `0`), only networks that are in use by one or more - containers are returned. - - `driver=` Matches a network's driver. - - `id=` Matches all or part of a network ID. - - `label=` or `label==` of a network label. - - `name=` Matches all or part of a network name. - - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. - type: "string" - tags: ["Network"] - - /networks/{id}: - get: - summary: "Inspect a network" - operationId: "NetworkInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Network" - 404: - description: "Network not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "verbose" - in: "query" - description: "Detailed inspect output for troubleshooting" - type: "boolean" - default: false - - name: "scope" - in: "query" - description: "Filter the network by scope (swarm, global, or local)" - type: "string" - tags: ["Network"] - - delete: - summary: "Remove a network" - operationId: "NetworkDelete" - responses: - 204: - description: "No error" - 403: - description: "operation not supported for pre-defined networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such network" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - tags: ["Network"] - - /networks/create: - post: - summary: "Create a network" - operationId: "NetworkCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "No error" - schema: - type: "object" - title: "NetworkCreateResponse" - properties: - Id: - description: "The ID of the created network." - type: "string" - Warning: - type: "string" - example: - Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" - Warning: "" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: | - Forbidden operation. This happens when trying to create a network named after a pre-defined network, - or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "plugin not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "networkConfig" - in: "body" - description: "Network configuration" - required: true - schema: - type: "object" - title: "NetworkCreateRequest" - required: ["Name"] - properties: - Name: - description: "The network's name." - type: "string" - CheckDuplicate: - description: | - Check for networks with duplicate names. Since Network is - primarily keyed based on a random ID and not on the name, and - network name is strictly a user-friendly alias to the network - which is uniquely identified using ID, there is no guaranteed - way to check for duplicates. CheckDuplicate is there to provide - a best effort checking of any networks which has the same name - but it is not guaranteed to catch all name collisions. - type: "boolean" - Driver: - description: "Name of the network driver plugin to use." - type: "string" - default: "bridge" - Internal: - description: "Restrict external access to the network." - type: "boolean" - Attachable: - description: | - Globally scoped network is manually attachable by regular - containers from workers in swarm mode. - type: "boolean" - Ingress: - description: | - Ingress network is the network which provides the routing-mesh - in swarm mode. - type: "boolean" - IPAM: - description: "Optional custom IP scheme for the network." - $ref: "#/definitions/IPAM" - EnableIPv6: - description: "Enable IPv6 on the network." - type: "boolean" - Options: - description: "Network specific options to be used by the drivers." - type: "object" - additionalProperties: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - Name: "isolated_nw" - CheckDuplicate: false - Driver: "bridge" - EnableIPv6: true - IPAM: - Driver: "default" - Config: - - Subnet: "172.20.0.0/16" - IPRange: "172.20.10.0/24" - Gateway: "172.20.10.11" - - Subnet: "2001:db8:abcd::/64" - Gateway: "2001:db8:abcd::1011" - Options: - foo: "bar" - Internal: true - Attachable: false - Ingress: false - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - tags: ["Network"] - - /networks/{id}/connect: - post: - summary: "Connect a container to a network" - operationId: "NetworkConnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 403: - description: "Operation not supported for swarm scoped networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - title: "NetworkConnectRequest" - properties: - Container: - type: "string" - description: "The ID or name of the container to connect to the network." - EndpointConfig: - $ref: "#/definitions/EndpointSettings" - example: - Container: "3613f73ba0e4" - EndpointConfig: - IPAMConfig: - IPv4Address: "172.24.56.89" - IPv6Address: "2001:db8::5689" - tags: ["Network"] - - /networks/{id}/disconnect: - post: - summary: "Disconnect a container from a network" - operationId: "NetworkDisconnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 403: - description: "Operation not supported for swarm scoped networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - title: "NetworkDisconnectRequest" - properties: - Container: - type: "string" - description: | - The ID or name of the container to disconnect from the network. - Force: - type: "boolean" - description: | - Force the container to disconnect from the network. - tags: ["Network"] - /networks/prune: - post: - summary: "Delete unused networks" - produces: - - "application/json" - operationId: "NetworkPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "NetworkPruneResponse" - properties: - NetworksDeleted: - description: "Networks that were deleted" - type: "array" - items: - type: "string" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Network"] - /plugins: - get: - summary: "List plugins" - operationId: "PluginList" - description: "Returns information about installed plugins." - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Plugin" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the plugin list. - - Available filters: - - - `capability=` - - `enable=|` - tags: ["Plugin"] - - /plugins/privileges: - get: - summary: "Get plugin privileges" - operationId: "GetPluginPrivileges" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - tags: - - "Plugin" - - /plugins/pull: - post: - summary: "Install a plugin" - operationId: "PluginPull" - description: | - Pulls and installs a plugin. After the plugin is installed, it can be - enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). - produces: - - "application/json" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: | - Remote reference for plugin to install. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "name" - in: "query" - description: | - Local name for the pulled plugin. - - The `:latest` tag is optional, and is used as the default if omitted. - required: false - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration to use when pulling a plugin - from a registry. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/{name}/json: - get: - summary: "Inspect a plugin" - operationId: "PluginInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - tags: ["Plugin"] - /plugins/{name}: - delete: - summary: "Remove a plugin" - operationId: "PluginDelete" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "force" - in: "query" - description: | - Disable the plugin before removing. This may result in issues if the - plugin is in use by a container. - type: "boolean" - default: false - tags: ["Plugin"] - /plugins/{name}/enable: - post: - summary: "Enable a plugin" - operationId: "PluginEnable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "timeout" - in: "query" - description: "Set the HTTP client timeout (in seconds)" - type: "integer" - default: 0 - tags: ["Plugin"] - /plugins/{name}/disable: - post: - summary: "Disable a plugin" - operationId: "PluginDisable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "force" - in: "query" - description: | - Force disable a plugin even if still in use. - required: false - type: "boolean" - tags: ["Plugin"] - /plugins/{name}/upgrade: - post: - summary: "Upgrade a plugin" - operationId: "PluginUpgrade" - responses: - 204: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "remote" - in: "query" - description: | - Remote reference to upgrade to. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration to use when pulling a plugin - from a registry. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/create: - post: - summary: "Create a plugin" - operationId: "PluginCreate" - consumes: - - "application/x-tar" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "query" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "tarContext" - in: "body" - description: "Path to tar containing plugin rootfs and manifest" - schema: - type: "string" - format: "binary" - tags: ["Plugin"] - /plugins/{name}/push: - post: - summary: "Push a plugin" - operationId: "PluginPush" - description: | - Push a plugin to the registry. - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - responses: - 200: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /plugins/{name}/set: - post: - summary: "Configure a plugin" - operationId: "PluginSet" - consumes: - - "application/json" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - type: "string" - example: ["DEBUG=1"] - responses: - 204: - description: "No error" - 404: - description: "Plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /nodes: - get: - summary: "List nodes" - operationId: "NodeList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Node" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `id=` - - `label=` - - `membership=`(`accepted`|`pending`)` - - `name=` - - `node.label=` - - `role=`(`manager`|`worker`)` - type: "string" - tags: ["Node"] - /nodes/{id}: - get: - summary: "Inspect a node" - operationId: "NodeInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Node" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - tags: ["Node"] - delete: - summary: "Delete a node" - operationId: "NodeDelete" - responses: - 200: - description: "no error" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - - name: "force" - in: "query" - description: "Force remove a node from the swarm" - default: false - type: "boolean" - tags: ["Node"] - /nodes/{id}/update: - post: - summary: "Update a node" - operationId: "NodeUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID of the node" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/NodeSpec" - - name: "version" - in: "query" - description: | - The version number of the node object being updated. This is required - to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - tags: ["Node"] - /swarm: - get: - summary: "Inspect swarm" - operationId: "SwarmInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Swarm" - 404: - description: "no such swarm" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/init: - post: - summary: "Initialize a new swarm" - operationId: "SwarmInit" - produces: - - "application/json" - - "text/plain" - responses: - 200: - description: "no error" - schema: - description: "The node ID" - type: "string" - example: "7v2t30z9blmxuhnyo6s4cpenp" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - title: "SwarmInitRequest" - properties: - ListenAddr: - description: | - Listen address used for inter-manager communication, as well - as determining the networking interface used for the VXLAN - Tunnel Endpoint (VTEP). This can either be an address/port - combination in the form `192.168.1.1:4567`, or an interface - followed by a port number, like `eth0:4567`. If the port number - is omitted, the default swarm listening port is used. - type: "string" - AdvertiseAddr: - description: | - Externally reachable address advertised to other nodes. This - can either be an address/port combination in the form - `192.168.1.1:4567`, or an interface followed by a port number, - like `eth0:4567`. If the port number is omitted, the port - number from the listen address is used. If `AdvertiseAddr` is - not specified, it will be automatically detected when possible. - type: "string" - DataPathAddr: - description: | - Address or interface to use for data path traffic (format: - ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same address - as `AdvertiseAddr` is used. - - The `DataPathAddr` specifies the address that global scope - network drivers will publish towards other nodes in order to - reach the containers running on this node. Using this parameter - it is possible to separate the container data traffic from the - management traffic of the cluster. - type: "string" - DataPathPort: - description: | - DataPathPort specifies the data path port number for data traffic. - Acceptable port range is 1024 to 49151. - if no port is set or is set to 0, default port 4789 will be used. - type: "integer" - format: "uint32" - DefaultAddrPool: - description: | - Default Address Pool specifies default subnet pools for global - scope networks. - type: "array" - items: - type: "string" - example: ["10.10.0.0/16", "20.20.0.0/16"] - ForceNewCluster: - description: "Force creation of a new swarm." - type: "boolean" - SubnetSize: - description: | - SubnetSize specifies the subnet size of the networks created - from the default subnet pool. - type: "integer" - format: "uint32" - Spec: - $ref: "#/definitions/SwarmSpec" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - DataPathPort: 4789 - DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] - SubnetSize: 24 - ForceNewCluster: false - Spec: - Orchestration: {} - Raft: {} - Dispatcher: {} - CAConfig: {} - EncryptionConfig: - AutoLockManagers: false - tags: ["Swarm"] - /swarm/join: - post: - summary: "Join an existing swarm" - operationId: "SwarmJoin" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - title: "SwarmJoinRequest" - properties: - ListenAddr: - description: | - Listen address used for inter-manager communication if the node - gets promoted to manager, as well as determining the networking - interface used for the VXLAN Tunnel Endpoint (VTEP). - type: "string" - AdvertiseAddr: - description: | - Externally reachable address advertised to other nodes. This - can either be an address/port combination in the form - `192.168.1.1:4567`, or an interface followed by a port number, - like `eth0:4567`. If the port number is omitted, the port - number from the listen address is used. If `AdvertiseAddr` is - not specified, it will be automatically detected when possible. - type: "string" - DataPathAddr: - description: | - Address or interface to use for data path traffic (format: - ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same address - as `AdvertiseAddr` is used. - - The `DataPathAddr` specifies the address that global scope - network drivers will publish towards other nodes in order to - reach the containers running on this node. Using this parameter - it is possible to separate the container data traffic from the - management traffic of the cluster. - - type: "string" - RemoteAddrs: - description: | - Addresses of manager nodes already participating in the swarm. - type: "array" - items: - type: "string" - JoinToken: - description: "Secret token for joining this swarm." - type: "string" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - RemoteAddrs: - - "node1:2377" - JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - tags: ["Swarm"] - /swarm/leave: - post: - summary: "Leave a swarm" - operationId: "SwarmLeave" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "force" - description: | - Force leave swarm, even if this is the last manager or that it will - break the cluster. - in: "query" - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/update: - post: - summary: "Update a swarm" - operationId: "SwarmUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - $ref: "#/definitions/SwarmSpec" - - name: "version" - in: "query" - description: | - The version number of the swarm object being updated. This is - required to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - - name: "rotateWorkerToken" - in: "query" - description: "Rotate the worker join token." - type: "boolean" - default: false - - name: "rotateManagerToken" - in: "query" - description: "Rotate the manager join token." - type: "boolean" - default: false - - name: "rotateManagerUnlockKey" - in: "query" - description: "Rotate the manager unlock key." - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/unlockkey: - get: - summary: "Get the unlock key" - operationId: "SwarmUnlockkey" - consumes: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "UnlockKeyResponse" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/unlock: - post: - summary: "Unlock a locked manager" - operationId: "SwarmUnlock" - consumes: - - "application/json" - produces: - - "application/json" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - title: "SwarmUnlockRequest" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /services: - get: - summary: "List services" - operationId: "ServiceList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Service" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the services list. - - Available filters: - - - `id=` - - `label=` - - `mode=["replicated"|"global"]` - - `name=` - - name: "status" - in: "query" - type: "boolean" - description: | - Include service status, with count of running and desired tasks. - tags: ["Service"] - /services/create: - post: - summary: "Create a service" - operationId: "ServiceCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - type: "object" - title: "ServiceCreateResponse" - properties: - ID: - description: "The ID of the created service." - type: "string" - Warning: - description: "Optional warning message" - type: "string" - example: - ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: "network is not eligible for services" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "name conflicts with an existing service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "web" - TaskTemplate: - ContainerSpec: - Image: "nginx:alpine" - Mounts: - - - ReadOnly: true - Source: "web-data" - Target: "/usr/share/nginx/html" - Type: "volume" - VolumeOptions: - DriverConfig: {} - Labels: - com.example.something: "something-value" - Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] - User: "33" - DNSConfig: - Nameservers: ["8.8.8.8"] - Search: ["example.org"] - Options: ["timeout:3"] - Secrets: - - - File: - Name: "www.example.org.key" - UID: "33" - GID: "33" - Mode: 384 - SecretID: "fpjqlhnwb19zds35k8wn80lq9" - SecretName: "example_org_domain_key" - LogDriver: - Name: "json-file" - Options: - max-file: "3" - max-size: "10M" - Placement: {} - Resources: - Limits: - MemoryBytes: 104857600 - Reservations: {} - RestartPolicy: - Condition: "on-failure" - Delay: 10000000000 - MaxAttempts: 10 - Mode: - Replicated: - Replicas: 4 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Ports: - - - Protocol: "tcp" - PublishedPort: 8080 - TargetPort: 80 - Labels: - foo: "bar" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration for pulling from private - registries. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - tags: ["Service"] - /services/{id}: - get: - summary: "Inspect a service" - operationId: "ServiceInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Service" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "insertDefaults" - in: "query" - description: "Fill empty fields with default values." - type: "boolean" - default: false - tags: ["Service"] - delete: - summary: "Delete a service" - operationId: "ServiceDelete" - responses: - 200: - description: "no error" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - tags: ["Service"] - /services/{id}/update: - post: - summary: "Update a service" - operationId: "ServiceUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ServiceUpdateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "top" - TaskTemplate: - ContainerSpec: - Image: "busybox" - Args: - - "top" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - - - name: "version" - in: "query" - description: | - The version number of the service object being updated. This is - required to avoid conflicting writes. - This version number should be the value as currently set on the - service *before* the update. You can find the current version by - calling `GET /services/{id}` - required: true - type: "integer" - - name: "registryAuthFrom" - in: "query" - description: | - If the `X-Registry-Auth` header is not specified, this parameter - indicates where to find registry authorization credentials. - type: "string" - enum: ["spec", "previous-spec"] - default: "spec" - - name: "rollback" - in: "query" - description: | - Set to this parameter to `previous` to cause a server-side rollback - to the previous service spec. The supplied spec will be ignored in - this case. - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration for pulling from private - registries. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - tags: ["Service"] - /services/{id}/logs: - get: - summary: "Get service logs" - description: | - Get `stdout` and `stderr` logs from a service. See also - [`/containers/{id}/logs`](#operation/ContainerLogs). - - **Note**: This endpoint works only for services with the `local`, - `json-file` or `journald` logging drivers. - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - operationId: "ServiceLogs" - responses: - 200: - description: "logs returned as a stream in response body" - schema: - type: "string" - format: "binary" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such service: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the service" - type: "string" - - name: "details" - in: "query" - description: "Show service context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. - type: "string" - default: "all" - tags: ["Service"] - /tasks: - get: - summary: "List tasks" - operationId: "TaskList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Task" - example: - - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - - ID: "1yljwbmlr8er2waf8orvqpwms" - Version: - Index: 30 - CreatedAt: "2016-06-07T21:07:30.019104782Z" - UpdatedAt: "2016-06-07T21:07:30.231958098Z" - Name: "hopeful_cori" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:30.202183143Z" - State: "shutdown" - Message: "shutdown" - ContainerStatus: - ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" - DesiredState: "shutdown" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.5/16" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the tasks list. - - Available filters: - - - `desired-state=(running | shutdown | accepted)` - - `id=` - - `label=key` or `label="key=value"` - - `name=` - - `node=` - - `service=` - tags: ["Task"] - /tasks/{id}: - get: - summary: "Inspect a task" - operationId: "TaskInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Task" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID of the task" - required: true - type: "string" - tags: ["Task"] - /tasks/{id}/logs: - get: - summary: "Get task logs" - description: | - Get `stdout` and `stderr` logs from a task. - See also [`/containers/{id}/logs`](#operation/ContainerLogs). - - **Note**: This endpoint works only for services with the `local`, - `json-file` or `journald` logging drivers. - operationId: "TaskLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - responses: - 200: - description: "logs returned as a stream in response body" - schema: - type: "string" - format: "binary" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such task: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID of the task" - type: "string" - - name: "details" - in: "query" - description: "Show task context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. - type: "string" - default: "all" - tags: ["Task"] - /secrets: - get: - summary: "List secrets" - operationId: "SecretList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Secret" - example: - - ID: "blt1owaxmitz71s9v5zh81zun" - Version: - Index: 85 - CreatedAt: "2017-07-20T13:55:28.678958722Z" - UpdatedAt: "2017-07-20T13:55:28.678958722Z" - Spec: - Name: "mysql-passwd" - Labels: - some.label: "some.value" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - Labels: - foo: "bar" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the secrets list. - - Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Secret"] - /secrets/create: - post: - summary: "Create a secret" - operationId: "SecretCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/SecretSpec" - - type: "object" - example: - Name: "app-key.crt" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - tags: ["Secret"] - /secrets/{id}: - get: - summary: "Inspect a secret" - operationId: "SecretInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Secret" - examples: - application/json: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - Labels: - foo: "bar" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - delete: - summary: "Delete a secret" - operationId: "SecretDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - /secrets/{id}/update: - post: - summary: "Update a Secret" - operationId: "SecretUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such secret" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the secret" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/SecretSpec" - description: | - The spec of the secret to update. Currently, only the Labels field - can be updated. All other fields must remain unchanged from the - [SecretInspect endpoint](#operation/SecretInspect) response values. - - name: "version" - in: "query" - description: | - The version number of the secret object being updated. This is - required to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - tags: ["Secret"] - /configs: - get: - summary: "List configs" - operationId: "ConfigList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Config" - example: - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "server.conf" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the configs list. - - Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Config"] - /configs/create: - post: - summary: "Create a config" - operationId: "ConfigCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/ConfigSpec" - - type: "object" - example: - Name: "server.conf" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - tags: ["Config"] - /configs/{id}: - get: - summary: "Inspect a config" - operationId: "ConfigInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Config" - examples: - application/json: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - 404: - description: "config not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the config" - tags: ["Config"] - delete: - summary: "Delete a config" - operationId: "ConfigDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "config not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the config" - tags: ["Config"] - /configs/{id}/update: - post: - summary: "Update a Config" - operationId: "ConfigUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such config" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the config" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/ConfigSpec" - description: | - The spec of the config to update. Currently, only the Labels field - can be updated. All other fields must remain unchanged from the - [ConfigInspect endpoint](#operation/ConfigInspect) response values. - - name: "version" - in: "query" - description: | - The version number of the config object being updated. This is - required to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - tags: ["Config"] - /distribution/{name}/json: - get: - summary: "Get image information from the registry" - description: | - Return image digest and platform information by contacting the registry. - operationId: "DistributionInspect" - produces: - - "application/json" - responses: - 200: - description: "descriptor and platform information" - schema: - $ref: "#/definitions/DistributionInspect" - 401: - description: "Failed authentication or no image found" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - tags: ["Distribution"] - /session: - post: - summary: "Initialize interactive session" - description: | - Start a new interactive session with a server. Session allows server to - call back to the client for advanced capabilities. - - ### Hijacking - - This endpoint hijacks the HTTP connection to HTTP2 transport that allows - the client to expose gPRC services on that connection. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /session HTTP/1.1 - Upgrade: h2c - Connection: Upgrade - ``` - - The Docker daemon responds with a `101 UPGRADED` response follow with - the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Connection: Upgrade - Upgrade: h2c - ``` - operationId: "Session" - produces: - - "application/vnd.docker.raw-stream" - responses: - 101: - description: "no error, hijacking successful" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Session"] diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go deleted file mode 100644 index bf3463b90e..0000000000 --- a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev // import "github.com/docker/docker/api/types/blkiodev" - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/list.go b/vendor/github.com/docker/docker/api/types/checkpoint/list.go deleted file mode 100644 index 94a9c0a47d..0000000000 --- a/vendor/github.com/docker/docker/api/types/checkpoint/list.go +++ /dev/null @@ -1,7 +0,0 @@ -package checkpoint - -// Summary represents the details of a checkpoint when listing endpoints. -type Summary struct { - // Name is the name of the checkpoint. - Name string -} diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/options.go b/vendor/github.com/docker/docker/api/types/checkpoint/options.go deleted file mode 100644 index 9477458c24..0000000000 --- a/vendor/github.com/docker/docker/api/types/checkpoint/options.go +++ /dev/null @@ -1,19 +0,0 @@ -package checkpoint - -// CreateOptions holds parameters to create a checkpoint from a container. -type CreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// ListOptions holds parameters to list checkpoints for a container. -type ListOptions struct { - CheckpointDir string -} - -// DeleteOptions holds parameters to delete a checkpoint from a container. -type DeleteOptions struct { - CheckpointID string - CheckpointDir string -} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go deleted file mode 100644 index 80691034e8..0000000000 --- a/vendor/github.com/docker/docker/api/types/client.go +++ /dev/null @@ -1,426 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "bufio" - "io" - "net" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" - units "github.com/docker/go-units" -) - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string `json:"ID"` - ContainerID string - Running bool - ExitCode int - Pid int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Until string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string - CheckpointDir string -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool - CopyUIDGID bool -} - -// EventsOptions holds parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// NewHijackedResponse intializes a HijackedResponse type -func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { - return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - mediaType string - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. -// returns false if HTTP Content-Type is not relevant, and container must be inspected -func (h *HijackedResponse) MediaType() (string, bool) { - if h.mediaType == "" { - return "", false - } - return h.mediaType, true -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - NetworkMode string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - // BuildArgs needs to be a *string instead of just a string so that - // we can tell the difference between "" (empty string) and no value - // at all (nil). See the parsing of buildArgs in - // api/server/router/build/build_routes.go for even more info. - BuildArgs map[string]*string - AuthConfigs map[string]registry.AuthConfig - Context io.Reader - Labels map[string]string - // squash the resulting image's layers to the parent - // preserves the original image and creates a new one from the parent with all - // the changes applied to a single layer - Squash bool - // CacheFrom specifies images that are used for matching cache. Images - // specified here do not need to have a valid parent chain to match cache. - CacheFrom []string - SecurityOpt []string - ExtraHosts []string // List of extra hosts - Target string - SessionID string - Platform string - // Version specifies the version of the unerlying builder to use - Version BuilderVersion - // BuildID is an optional identifier that can be passed together with the - // build request. The same identifier can be used to gracefully cancel the - // build with the cancel request. - BuildID string - // Outputs defines configurations for exporting build results. Only supported - // in BuildKit mode - Outputs []ImageBuildOutput -} - -// ImageBuildOutput defines configuration for exporting a build result -type ImageBuildOutput struct { - Type string - Attrs map[string]string -} - -// BuilderVersion sets the version of underlying builder to use -type BuilderVersion string - -const ( - // BuilderV1 is the first generation builder in docker daemon - BuilderV1 BuilderVersion = "1" - // BuilderBuildKit is builder based on moby/buildkit project - BuilderBuildKit BuilderVersion = "2" -) - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. - Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. - SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image - Platform string // Platform is the target platform of the image -} - -// ImageListOptions holds parameters to list images with. -type ImageListOptions struct { - // All controls whether all images in the graph are filtered, or just - // the heads. - All bool - - // Filters is a JSON-encoded set of filter arguments. - Filters filters.Args - - // SharedSize indicates whether the shared size of images should be computed. - SharedSize bool - - // ContainerCount indicates whether container count should be computed. - ContainerCount bool -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - Platform string -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -// ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args - Limit int -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height uint - Width uint -} - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filters filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string - // Warnings is a set of non-fatal warning messages to pass on to the user. - Warnings []string `json:",omitempty"` -} - -// Values for RegistryAuthFrom in ServiceUpdateOptions -const ( - RegistryAuthFromSpec = "spec" - RegistryAuthFromPreviousSpec = "previous-spec" -) - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. - - // RegistryAuthFrom specifies where to find the registry authorization - // credentials if they are not given in EncodedRegistryAuth. Valid - // values are "spec" and "previous-spec". - RegistryAuthFrom string - - // Rollback indicates whether a server-side rollback should be - // performed. When this is set, the provided spec will be ignored. - // The valid values are "previous" and "none". An empty value is the - // same as "none". - Rollback string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filters filters.Args - - // Status indicates whether the server should include the service task - // count of running and desired tasks. - Status bool -} - -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -type ServiceInspectOptions struct { - InsertDefaults bool -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filters filters.Args -} - -// PluginRemoveOptions holds parameters to remove plugins. -type PluginRemoveOptions struct { - Force bool -} - -// PluginEnableOptions holds parameters to enable plugins. -type PluginEnableOptions struct { - Timeout int -} - -// PluginDisableOptions holds parameters to disable plugins. -type PluginDisableOptions struct { - Force bool -} - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - RemoteRef string // RemoteRef is the plugin name on the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) - Args []string -} - -// SwarmUnlockKeyResponse contains the response for Engine API: -// GET /swarm/unlockkey -type SwarmUnlockKeyResponse struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// PluginCreateOptions hold all options to plugin create. -type PluginCreateOptions struct { - RepoName string -} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go deleted file mode 100644 index 7d5930bbeb..0000000000 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ /dev/null @@ -1,67 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - Platform *ocispec.Platform - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Env []string // Environment variables - WorkingDir string // Working directory - Cmd []string // Execution commands and args -} - -// PluginRmConfig holds arguments for plugin remove. -type PluginRmConfig struct { - ForceRemove bool -} - -// PluginEnableConfig holds arguments for plugin enable -type PluginEnableConfig struct { - Timeout int -} - -// PluginDisableConfig holds arguments for plugin disable. -type PluginDisableConfig struct { - ForceDisable bool -} - -// NetworkListConfig stores the options available for listing networks -type NetworkListConfig struct { - // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here - Detailed bool - Verbose bool -} diff --git a/vendor/github.com/docker/docker/api/types/container/change_type.go b/vendor/github.com/docker/docker/api/types/container/change_type.go deleted file mode 100644 index fe8d6d3696..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/change_type.go +++ /dev/null @@ -1,15 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ChangeType Kind of change -// -// Can be one of: -// -// - `0`: Modified ("C") -// - `1`: Added ("A") -// - `2`: Deleted ("D") -// -// swagger:model ChangeType -type ChangeType uint8 diff --git a/vendor/github.com/docker/docker/api/types/container/change_types.go b/vendor/github.com/docker/docker/api/types/container/change_types.go deleted file mode 100644 index 3a3a83866e..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/change_types.go +++ /dev/null @@ -1,23 +0,0 @@ -package container - -const ( - // ChangeModify represents the modify operation. - ChangeModify ChangeType = 0 - // ChangeAdd represents the add operation. - ChangeAdd ChangeType = 1 - // ChangeDelete represents the delete operation. - ChangeDelete ChangeType = 2 -) - -func (ct ChangeType) String() string { - switch ct { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - default: - return "" - } -} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go deleted file mode 100644 index d8cfe443f8..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ /dev/null @@ -1,79 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "io" - "time" - - "github.com/docker/docker/api/types/strslice" - dockerspec "github.com/docker/docker/image/spec/specs-go/v1" - "github.com/docker/go-connections/nat" -) - -// MinimumDuration puts a minimum on user configured duration. -// This is to prevent API error on time unit. For example, API may -// set 3 as healthcheck interval with intention of 3 seconds, but -// Docker interprets it as 3 nanoseconds. -const MinimumDuration = 1 * time.Millisecond - -// StopOptions holds the options to stop or restart a container. -type StopOptions struct { - // Signal (optional) is the signal to send to the container to (gracefully) - // stop it before forcibly terminating the container with SIGKILL after the - // timeout expires. If not value is set, the default (SIGTERM) is used. - Signal string `json:",omitempty"` - - // Timeout (optional) is the timeout (in seconds) to wait for the container - // to stop gracefully before forcibly terminating it with SIGKILL. - // - // - Use nil to use the default timeout (10 seconds). - // - Use '-1' to wait indefinitely. - // - Use '0' to not wait for the container to exit gracefully, and - // immediately proceeds to forcibly terminating the container. - // - Other positive values are used as timeout (in seconds). - Timeout *int `json:",omitempty"` -} - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig = dockerspec.HealthcheckConfig - -// ExecStartOptions holds the options to start container's exec. -type ExecStartOptions struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - ConsoleSize *[2]uint `json:",omitempty"` -} - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go deleted file mode 100644 index 63381da367..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerTopOKBody OK response to ContainerTop operation -// swagger:model ContainerTopOKBody -type ContainerTopOKBody struct { - - // Each process running in the container, where each is process - // is an array of values corresponding to the titles. - // - // Required: true - Processes [][]string `json:"Processes"` - - // The ps column titles - // Required: true - Titles []string `json:"Titles"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go deleted file mode 100644 index c10f175ea8..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ /dev/null @@ -1,16 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerUpdateOKBody OK response to ContainerUpdate operation -// swagger:model ContainerUpdateOKBody -type ContainerUpdateOKBody struct { - - // warnings - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/docker/docker/api/types/container/create_response.go deleted file mode 100644 index aa0e7f7d07..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/create_response.go +++ /dev/null @@ -1,19 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// CreateResponse ContainerCreateResponse -// -// OK response to ContainerCreate operation -// swagger:model CreateResponse -type CreateResponse struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/errors.go b/vendor/github.com/docker/docker/api/types/container/errors.go deleted file mode 100644 index 32c978037e..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/errors.go +++ /dev/null @@ -1,9 +0,0 @@ -package container - -type errInvalidParameter struct{ error } - -func (e *errInvalidParameter) InvalidParameter() {} - -func (e *errInvalidParameter) Unwrap() error { - return e.error -} diff --git a/vendor/github.com/docker/docker/api/types/container/filesystem_change.go b/vendor/github.com/docker/docker/api/types/container/filesystem_change.go deleted file mode 100644 index 9e9c2ad1d5..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/filesystem_change.go +++ /dev/null @@ -1,19 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// FilesystemChange Change in the container's filesystem. -// -// swagger:model FilesystemChange -type FilesystemChange struct { - - // kind - // Required: true - Kind ChangeType `json:"Kind"` - - // Path to file or directory that has changed. - // - // Required: true - Path string `json:"Path"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go deleted file mode 100644 index c49b64716a..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ /dev/null @@ -1,493 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" -) - -// CgroupnsMode represents the cgroup namespace mode of the container -type CgroupnsMode string - -// cgroup namespace modes for containers -const ( - CgroupnsModeEmpty CgroupnsMode = "" - CgroupnsModePrivate CgroupnsMode = "private" - CgroupnsModeHost CgroupnsMode = "host" -) - -// IsPrivate indicates whether the container uses its own private cgroup namespace -func (c CgroupnsMode) IsPrivate() bool { - return c == CgroupnsModePrivate -} - -// IsHost indicates whether the container shares the host's cgroup namespace -func (c CgroupnsMode) IsHost() bool { - return c == CgroupnsModeHost -} - -// IsEmpty indicates whether the container cgroup namespace mode is unset -func (c CgroupnsMode) IsEmpty() bool { - return c == CgroupnsModeEmpty -} - -// Valid indicates whether the cgroup namespace mode is valid -func (c CgroupnsMode) Valid() bool { - return c.IsEmpty() || c.IsPrivate() || c.IsHost() -} - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// Isolation modes for containers -const ( - IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) - IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon - IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode - IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode -) - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - // TODO consider making isolation-mode strict (case-sensitive) - v := Isolation(strings.ToLower(string(i))) - return v == IsolationDefault || v == IsolationEmpty -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - // TODO consider making isolation-mode strict (case-sensitive) - return Isolation(strings.ToLower(string(i))) == IsolationHyperV -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - // TODO consider making isolation-mode strict (case-sensitive) - return Isolation(strings.ToLower(string(i))) == IsolationProcess -} - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IpcMode constants -const ( - IPCModeNone IpcMode = "none" - IPCModeHost IpcMode = "host" - IPCModeContainer IpcMode = "container" - IPCModePrivate IpcMode = "private" - IPCModeShareable IpcMode = "shareable" -) - -// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. -func (n IpcMode) IsPrivate() bool { - return n == IPCModePrivate -} - -// IsHost indicates whether the container shares the host's ipc namespace. -func (n IpcMode) IsHost() bool { - return n == IPCModeHost -} - -// IsShareable indicates whether the container's ipc namespace can be shared with another container. -func (n IpcMode) IsShareable() bool { - return n == IPCModeShareable -} - -// IsContainer indicates whether the container uses another container's ipc namespace. -func (n IpcMode) IsContainer() bool { - _, ok := containerID(string(n)) - return ok -} - -// IsNone indicates whether container IpcMode is set to "none". -func (n IpcMode) IsNone() bool { - return n == IPCModeNone -} - -// IsEmpty indicates whether container IpcMode is empty -func (n IpcMode) IsEmpty() bool { - return n == "" -} - -// Valid indicates whether the ipc mode is valid. -func (n IpcMode) Valid() bool { - // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. - return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() (idOrName string) { - idOrName, _ = containerID(string(n)) - return idOrName -} - -// NetworkMode represents the container network stack. -type NetworkMode string - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - _, ok := containerID(string(n)) - return ok -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() (idOrName string) { - idOrName, _ = containerID(string(n)) - return idOrName -} - -// UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !n.IsHost() -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - return n == "" || n.IsHost() -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - _, ok := containerID(string(c)) - return ok -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. - return c == "" || c.IsContainer() -} - -// Container returns the ID or name of the container whose cgroup will be used. -func (c CgroupSpec) Container() (idOrName string) { - idOrName, _ = containerID(string(c)) - return idOrName -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !n.IsHost() -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - return n == "" || n.IsHost() -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - _, ok := containerID(string(n)) - return ok -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - return n == "" || n.IsHost() || validContainer(string(n)) -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() (idOrName string) { - idOrName, _ = containerID(string(n)) - return idOrName -} - -// DeviceRequest represents a request for devices from a device driver. -// Used by GPU device drivers. -type DeviceRequest struct { - Driver string // Name of device driver - Count int // Number of devices to request (-1 = All) - DeviceIDs []string // List of device IDs as recognizable by the device driver - Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") - Options map[string]string // Options to pass onto the device driver -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name RestartPolicyMode - MaximumRetryCount int -} - -type RestartPolicyMode string - -const ( - RestartPolicyDisabled RestartPolicyMode = "no" - RestartPolicyAlways RestartPolicyMode = "always" - RestartPolicyOnFailure RestartPolicyMode = "on-failure" - RestartPolicyUnlessStopped RestartPolicyMode = "unless-stopped" -) - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == RestartPolicyDisabled || rp.Name == "" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == RestartPolicyAlways -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == RestartPolicyOnFailure -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == RestartPolicyUnlessStopped -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// ValidateRestartPolicy validates the given RestartPolicy. -func ValidateRestartPolicy(policy RestartPolicy) error { - switch policy.Name { - case RestartPolicyAlways, RestartPolicyUnlessStopped, RestartPolicyDisabled: - if policy.MaximumRetryCount != 0 { - msg := "invalid restart policy: maximum retry count can only be used with 'on-failure'" - if policy.MaximumRetryCount < 0 { - msg += " and cannot be negative" - } - return &errInvalidParameter{fmt.Errorf(msg)} - } - return nil - case RestartPolicyOnFailure: - if policy.MaximumRetryCount < 0 { - return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")} - } - return nil - case "": - // Versions before v25.0.0 created an empty restart-policy "name" as - // default. Allow an empty name with "any" MaximumRetryCount for - // backward-compatibility. - return nil - default: - return &errInvalidParameter{fmt.Errorf("invalid restart policy: unknown policy '%s'; use one of '%s', '%s', '%s', or '%s'", policy.Name, RestartPolicyDisabled, RestartPolicyAlways, RestartPolicyOnFailure, RestartPolicyUnlessStopped)} - } -} - -// LogMode is a type to define the available modes for logging -// These modes affect how logs are handled when log messages start piling up. -type LogMode string - -// Available logging modes -const ( - LogModeUnset LogMode = "" - LogModeBlocking LogMode = "blocking" - LogModeNonBlock LogMode = "non-blocking" -) - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period - CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DeviceCgroupRules []string // List of rule to be added to the device cgroup - DeviceRequests []DeviceRequest // List of device requests for device drivers - - // KernelMemory specifies the kernel memory limit (in bytes) for the container. - // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. - KernelMemory int64 `json:",omitempty"` - KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - ConsoleSize [2]uint // Initial console size (height,width) - Annotations map[string]string `json:",omitempty"` // Arbitrary non-identifying metadata attached to container and provided to the runtime - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - Runtime string `json:",omitempty"` // Runtime to use with this container - - // Applicable to Windows - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources - - // Mounts specs used by the container - Mounts []mount.Mount `json:",omitempty"` - - // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) - MaskedPaths []string - - // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) - ReadonlyPaths []string - - // Run a custom init inside the container, if null, use the daemon's configured settings - Init *bool `json:",omitempty"` -} - -// containerID splits "container:" values. It returns the container -// ID or name, and whether an ID/name was found. It returns an empty string and -// a "false" if the value does not have a "container:" prefix. Further validation -// of the returned, including checking if the value is empty, should be handled -// by the caller. -func containerID(val string) (idOrName string, ok bool) { - k, v, hasSep := strings.Cut(val, ":") - if !hasSep || k != "container" { - return "", false - } - return v, true -} - -// validContainer checks if the given value is a "container:" mode with -// a non-empty name/ID. -func validContainer(val string) bool { - id, ok := containerID(val) - return ok && id != "" -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go deleted file mode 100644 index a14fe8d1af..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,41 +0,0 @@ -//go:build !windows - -package container // import "github.com/docker/docker/api/types/container" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go deleted file mode 100644 index 99f803a5bb..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == "nat" -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() -} - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } else if n.IsBridge() { - return "nat" - } else if n.IsNone() { - return "none" - } else if n.IsContainer() { - return "container" - } else if n.IsUserDefined() { - return n.UserDefined() - } - - return "" -} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go deleted file mode 100644 index ab56d4eed8..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go +++ /dev/null @@ -1,12 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// WaitExitError container waiting error, if any -// swagger:model WaitExitError -type WaitExitError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/docker/docker/api/types/container/wait_response.go deleted file mode 100644 index 84fc6afddc..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/wait_response.go +++ /dev/null @@ -1,18 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// WaitResponse ContainerWaitResponse -// -// OK response to ContainerWait operation -// swagger:model WaitResponse -type WaitResponse struct { - - // error - Error *WaitExitError `json:"Error,omitempty"` - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go deleted file mode 100644 index cd8311f99c..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// WaitCondition is a type used to specify a container state for which -// to wait. -type WaitCondition string - -// Possible WaitCondition Values. -// -// WaitConditionNotRunning (default) is used to wait for any of the non-running -// states: "created", "exited", "dead", "removing", or "removed". -// -// WaitConditionNextExit is used to wait for the next time the state changes -// to a non-running state. If the state is currently "created" or "exited", -// this would cause Wait() to block until either the container runs and exits -// or is removed. -// -// WaitConditionRemoved is used to wait for the container to be removed. -const ( - WaitConditionNotRunning WaitCondition = "not-running" - WaitConditionNextExit WaitCondition = "next-exit" - WaitConditionRemoved WaitCondition = "removed" -) diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go deleted file mode 100644 index dc942d9d9e..0000000000 --- a/vendor/github.com/docker/docker/api/types/error_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ErrorResponse Represents an error. -// swagger:model ErrorResponse -type ErrorResponse struct { - - // The error message. - // Required: true - Message string `json:"message"` -} diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go deleted file mode 100644 index f84f034cd5..0000000000 --- a/vendor/github.com/docker/docker/api/types/error_response_ext.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// Error returns the error message -func (e ErrorResponse) Error() string { - return e.Message -} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go deleted file mode 100644 index 6dbcd92235..0000000000 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ /dev/null @@ -1,127 +0,0 @@ -package events // import "github.com/docker/docker/api/types/events" - -// Type is used for event-types. -type Type string - -// List of known event types. -const ( - BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates. - ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate. - ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate. - DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate. - ImageEventType Type = "image" // ImageEventType is the event type that images generate. - NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate. - NodeEventType Type = "node" // NodeEventType is the event type that nodes generate. - PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate. - SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate. - ServiceEventType Type = "service" // ServiceEventType is the event type that services generate. - VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. -) - -// Action is used for event-actions. -type Action string - -const ( - ActionCreate Action = "create" - ActionStart Action = "start" - ActionRestart Action = "restart" - ActionStop Action = "stop" - ActionCheckpoint Action = "checkpoint" - ActionPause Action = "pause" - ActionUnPause Action = "unpause" - ActionAttach Action = "attach" - ActionDetach Action = "detach" - ActionResize Action = "resize" - ActionUpdate Action = "update" - ActionRename Action = "rename" - ActionKill Action = "kill" - ActionDie Action = "die" - ActionOOM Action = "oom" - ActionDestroy Action = "destroy" - ActionRemove Action = "remove" - ActionCommit Action = "commit" - ActionTop Action = "top" - ActionCopy Action = "copy" - ActionArchivePath Action = "archive-path" - ActionExtractToDir Action = "extract-to-dir" - ActionExport Action = "export" - ActionImport Action = "import" - ActionSave Action = "save" - ActionLoad Action = "load" - ActionTag Action = "tag" - ActionUnTag Action = "untag" - ActionPush Action = "push" - ActionPull Action = "pull" - ActionPrune Action = "prune" - ActionDelete Action = "delete" - ActionEnable Action = "enable" - ActionDisable Action = "disable" - ActionConnect Action = "connect" - ActionDisconnect Action = "disconnect" - ActionReload Action = "reload" - ActionMount Action = "mount" - ActionUnmount Action = "unmount" - - // ActionExecCreate is the prefix used for exec_create events. These - // event-actions are commonly followed by a colon and space (": "), - // and the command that's defined for the exec, for example: - // - // exec_create: /bin/sh -c 'echo hello' - // - // This is far from ideal; it's a compromise to allow filtering and - // to preserve backward-compatibility. - ActionExecCreate Action = "exec_create" - // ActionExecStart is the prefix used for exec_create events. These - // event-actions are commonly followed by a colon and space (": "), - // and the command that's defined for the exec, for example: - // - // exec_start: /bin/sh -c 'echo hello' - // - // This is far from ideal; it's a compromise to allow filtering and - // to preserve backward-compatibility. - ActionExecStart Action = "exec_start" - ActionExecDie Action = "exec_die" - ActionExecDetach Action = "exec_detach" - - // ActionHealthStatus is the prefix to use for health_status events. - // - // Health-status events can either have a pre-defined status, in which - // case the "health_status" action is followed by a colon, or can be - // "free-form", in which case they're followed by the output of the - // health-check output. - // - // This is far form ideal, and a compromise to allow filtering, and - // to preserve backward-compatibility. - ActionHealthStatus Action = "health_status" - ActionHealthStatusRunning Action = "health_status: running" - ActionHealthStatusHealthy Action = "health_status: healthy" - ActionHealthStatusUnhealthy Action = "health_status: unhealthy" -) - -// Actor describes something that generates events, -// like a container, or a network, or a volume. -// It has a defined name and a set of attributes. -// The container attributes are its labels, other actors -// can generate these attributes from other properties. -type Actor struct { - ID string - Attributes map[string]string -} - -// Message represents the information an event contains -type Message struct { - // Deprecated information from JSONMessage. - // With data only in container events. - Status string `json:"status,omitempty"` // Deprecated: use Action instead. - ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead. - From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. - - Type Type - Action Action - Actor Actor - // Engine events are local scope. Cluster events are swarm scope. - Scope string `json:"scope,omitempty"` - - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/filters/errors.go b/vendor/github.com/docker/docker/api/types/filters/errors.go deleted file mode 100644 index f52f694408..0000000000 --- a/vendor/github.com/docker/docker/api/types/filters/errors.go +++ /dev/null @@ -1,37 +0,0 @@ -package filters - -import "fmt" - -// invalidFilter indicates that the provided filter or its value is invalid -type invalidFilter struct { - Filter string - Value []string -} - -func (e invalidFilter) Error() string { - msg := "invalid filter" - if e.Filter != "" { - msg += " '" + e.Filter - if e.Value != nil { - msg = fmt.Sprintf("%s=%s", msg, e.Value) - } - msg += "'" - } - return msg -} - -// InvalidParameter marks this error as ErrInvalidParameter -func (e invalidFilter) InvalidParameter() {} - -// unreachableCode is an error indicating that the code path was not expected to be reached. -type unreachableCode struct { - Filter string - Value []string -} - -// System marks this error as ErrSystem -func (e unreachableCode) System() {} - -func (e unreachableCode) Error() string { - return fmt.Sprintf("unreachable code reached for filter: %q with values: %s", e.Filter, e.Value) -} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go deleted file mode 100644 index 0c39ab5f18..0000000000 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ /dev/null @@ -1,346 +0,0 @@ -/* -Package filters provides tools for encoding a mapping of keys to a set of -multiple values. -*/ -package filters // import "github.com/docker/docker/api/types/filters" - -import ( - "encoding/json" - "regexp" - "strings" - - "github.com/docker/docker/api/types/versions" -) - -// Args stores a mapping of keys to a set of multiple values. -type Args struct { - fields map[string]map[string]bool -} - -// KeyValuePair are used to initialize a new Args -type KeyValuePair struct { - Key string - Value string -} - -// Arg creates a new KeyValuePair for initializing Args -func Arg(key, value string) KeyValuePair { - return KeyValuePair{Key: key, Value: value} -} - -// NewArgs returns a new Args populated with the initial args -func NewArgs(initialArgs ...KeyValuePair) Args { - args := Args{fields: map[string]map[string]bool{}} - for _, arg := range initialArgs { - args.Add(arg.Key, arg.Value) - } - return args -} - -// Keys returns all the keys in list of Args -func (args Args) Keys() []string { - keys := make([]string, 0, len(args.fields)) - for k := range args.fields { - keys = append(keys, k) - } - return keys -} - -// MarshalJSON returns a JSON byte representation of the Args -func (args Args) MarshalJSON() ([]byte, error) { - if len(args.fields) == 0 { - return []byte("{}"), nil - } - return json.Marshal(args.fields) -} - -// ToJSON returns the Args as a JSON encoded string -func ToJSON(a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - buf, err := json.Marshal(a) - return string(buf), err -} - -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: do not use in any new code; use ToJSON instead -func ToParamWithVersion(version string, a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - - if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err - } - - return ToJSON(a) -} - -// FromJSON decodes a JSON encoded string into Args -func FromJSON(p string) (Args, error) { - args := NewArgs() - - if p == "" { - return args, nil - } - - raw := []byte(p) - err := json.Unmarshal(raw, &args) - if err == nil { - return args, nil - } - - // Fallback to parsing arguments in the legacy slice format - deprecated := map[string][]string{} - if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, &invalidFilter{} - } - - args.fields = deprecatedArgs(deprecated) - return args, nil -} - -// UnmarshalJSON populates the Args from JSON encode bytes -func (args Args) UnmarshalJSON(raw []byte) error { - return json.Unmarshal(raw, &args.fields) -} - -// Get returns the list of values associated with the key -func (args Args) Get(key string) []string { - values := args.fields[key] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add a new value to the set of values -func (args Args) Add(key, value string) { - if _, ok := args.fields[key]; ok { - args.fields[key][value] = true - } else { - args.fields[key] = map[string]bool{value: true} - } -} - -// Del removes a value from the set -func (args Args) Del(key, value string) { - if _, ok := args.fields[key]; ok { - delete(args.fields[key], value) - if len(args.fields[key]) == 0 { - delete(args.fields, key) - } - } -} - -// Len returns the number of keys in the mapping -func (args Args) Len() int { - return len(args.fields) -} - -// MatchKVList returns true if all the pairs in sources exist as key=value -// pairs in the mapping at key, or if there are no values at key. -func (args Args) MatchKVList(key string, sources map[string]string) bool { - fieldValues := args.fields[key] - - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if len(sources) == 0 { - return false - } - - for value := range fieldValues { - testK, testV, hasValue := strings.Cut(value, "=") - - v, ok := sources[testK] - if !ok { - return false - } - if hasValue && testV != v { - return false - } - } - - return true -} - -// Match returns true if any of the values at key match the source string -func (args Args) Match(field, source string) bool { - if args.ExactMatch(field, source) { - return true - } - - fieldValues := args.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// GetBoolOrDefault returns a boolean value of the key if the key is present -// and is intepretable as a boolean value. Otherwise the default value is returned. -// Error is not nil only if the filter values are not valid boolean or are conflicting. -func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { - fieldValues, ok := args.fields[key] - - if !ok { - return defaultValue, nil - } - - if len(fieldValues) == 0 { - return defaultValue, &invalidFilter{key, nil} - } - - isFalse := fieldValues["0"] || fieldValues["false"] - isTrue := fieldValues["1"] || fieldValues["true"] - - conflicting := isFalse && isTrue - invalid := !isFalse && !isTrue - - if conflicting || invalid { - return defaultValue, &invalidFilter{key, args.Get(key)} - } else if isFalse { - return false, nil - } else if isTrue { - return true, nil - } - - // This code shouldn't be reached. - return defaultValue, &unreachableCode{Filter: key, Value: args.Get(key)} -} - -// ExactMatch returns true if the source matches exactly one of the values. -func (args Args) ExactMatch(key, source string) bool { - fieldValues, ok := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one value and the source -// matches exactly the value. -func (args Args) UniqueExactMatch(key, source string) bool { - fieldValues := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(args.fields[key]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one value, or the -// source has one of the values as a prefix. -func (args Args) FuzzyMatch(key, source string) bool { - if args.ExactMatch(key, source) { - return true - } - - fieldValues := args.fields[key] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Contains returns true if the key exists in the mapping -func (args Args) Contains(field string) bool { - _, ok := args.fields[field] - return ok -} - -// Validate compared the set of accepted keys against the keys in the mapping. -// An error is returned if any mapping keys are not in the accepted set. -func (args Args) Validate(accepted map[string]bool) error { - for name := range args.fields { - if !accepted[name] { - return &invalidFilter{name, nil} - } - } - return nil -} - -// WalkValues iterates over the list of values for a key in the mapping and calls -// op() for each value. If op returns an error the iteration stops and the -// error is returned. -func (args Args) WalkValues(field string, op func(value string) error) error { - if _, ok := args.fields[field]; !ok { - return nil - } - for v := range args.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -// Clone returns a copy of args. -func (args Args) Clone() (newArgs Args) { - newArgs.fields = make(map[string]map[string]bool, len(args.fields)) - for k, m := range args.fields { - var mm map[string]bool - if m != nil { - mm = make(map[string]bool, len(m)) - for kk, v := range m { - mm[kk] = v - } - } - newArgs.fields[k] = mm - } - return newArgs -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go deleted file mode 100644 index ce3deb331c..0000000000 --- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// GraphDriverData Information about the storage driver used to store the container's and -// image's filesystem. -// -// swagger:model GraphDriverData -type GraphDriverData struct { - - // Low-level storage metadata, provided as key/value pairs. - // - // This information is driver-specific, and depends on the storage-driver - // in use, and should be used for informational purposes only. - // - // Required: true - Data map[string]string `json:"Data"` - - // Name of the storage driver. - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go deleted file mode 100644 index 7592d2f8b1..0000000000 --- a/vendor/github.com/docker/docker/api/types/id_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// IDResponse Response to an API call that returns just an Id -// swagger:model IdResponse -type IDResponse struct { - - // The id of the newly created object. - // Required: true - ID string `json:"Id"` -} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go deleted file mode 100644 index e302bb0aeb..0000000000 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ /dev/null @@ -1,36 +0,0 @@ -package image // import "github.com/docker/docker/api/types/image" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// HistoryResponseItem individual image layer information in response to ImageHistory operation -// swagger:model HistoryResponseItem -type HistoryResponseItem struct { - - // comment - // Required: true - Comment string `json:"Comment"` - - // created - // Required: true - Created int64 `json:"Created"` - - // created by - // Required: true - CreatedBy string `json:"CreatedBy"` - - // Id - // Required: true - ID string `json:"Id"` - - // size - // Required: true - Size int64 `json:"Size"` - - // tags - // Required: true - Tags []string `json:"Tags"` -} diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go deleted file mode 100644 index 3cefecb0da..0000000000 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ /dev/null @@ -1,9 +0,0 @@ -package image - -import ocispec "github.com/opencontainers/image-spec/specs-go/v1" - -// GetImageOpts holds parameters to inspect an image. -type GetImageOpts struct { - Platform *ocispec.Platform - Details bool -} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go deleted file mode 100644 index b9a65a0d8e..0000000000 --- a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageDeleteResponseItem image delete response item -// swagger:model ImageDeleteResponseItem -type ImageDeleteResponseItem struct { - - // The image ID of an image that was deleted - Deleted string `json:"Deleted,omitempty"` - - // The image ID of an image that was untagged - Untagged string `json:"Untagged,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go deleted file mode 100644 index 076848ad5f..0000000000 --- a/vendor/github.com/docker/docker/api/types/image_summary.go +++ /dev/null @@ -1,89 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageSummary image summary -// swagger:model ImageSummary -type ImageSummary struct { - - // Number of containers using this image. Includes both stopped and running - // containers. - // - // This size is not calculated by default, and depends on which API endpoint - // is used. `-1` indicates that the value has not been set / calculated. - // - // Required: true - Containers int64 `json:"Containers"` - - // Date and time at which the image was created as a Unix timestamp - // (number of seconds sinds EPOCH). - // - // Required: true - Created int64 `json:"Created"` - - // ID is the content-addressable ID of an image. - // - // This identifier is a content-addressable digest calculated from the - // image's configuration (which includes the digests of layers used by - // the image). - // - // Note that this digest differs from the `RepoDigests` below, which - // holds digests of image manifests that reference the image. - // - // Required: true - ID string `json:"Id"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // ID of the parent image. - // - // Depending on how the image was created, this field may be empty and - // is only set for images that were built/created locally. This field - // is empty if the image was pulled from an image registry. - // - // Required: true - ParentID string `json:"ParentId"` - - // List of content-addressable digests of locally available image manifests - // that the image is referenced from. Multiple manifests can refer to the - // same image. - // - // These digests are usually only available if the image was either pulled - // from a registry, or if the image was pushed to a registry, which is when - // the manifest is generated and its digest calculated. - // - // Required: true - RepoDigests []string `json:"RepoDigests"` - - // List of image names/tags in the local image cache that reference this - // image. - // - // Multiple image tags can refer to the same image, and this list may be - // empty if no tags reference the image, in which case the image is - // "untagged", in which case it can still be referenced by its ID. - // - // Required: true - RepoTags []string `json:"RepoTags"` - - // Total size of image layers that are shared between this image and other - // images. - // - // This size is not calculated by default. `-1` indicates that the value - // has not been set / calculated. - // - // Required: true - SharedSize int64 `json:"SharedSize"` - - // Total size of the image including all layers it is composed of. - // - // Required: true - Size int64 `json:"Size"` - - // Total size of the image including all layers it is composed of. - // - // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - VirtualSize int64 `json:"VirtualSize,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go deleted file mode 100644 index 57edf2ef18..0000000000 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ /dev/null @@ -1,145 +0,0 @@ -package mount // import "github.com/docker/docker/api/types/mount" - -import ( - "os" -) - -// Type represents the type of a mount. -type Type string - -// Type constants -const ( - // TypeBind is the type for mounting host dir - TypeBind Type = "bind" - // TypeVolume is the type for remote storage volumes - TypeVolume Type = "volume" - // TypeTmpfs is the type for mounting tmpfs - TypeTmpfs Type = "tmpfs" - // TypeNamedPipe is the type for mounting Windows named pipes - TypeNamedPipe Type = "npipe" - // TypeCluster is the type for Swarm Cluster Volumes. - TypeCluster Type = "cluster" -) - -// Mount represents a mount (volume). -type Mount struct { - Type Type `json:",omitempty"` - // Source specifies the name of the mount. Depending on mount type, this - // may be a volume name or a host path, or even ignored. - // Source is not supported for tmpfs (must be an empty value) - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` // attempts recursive read-only if possible - Consistency Consistency `json:",omitempty"` - - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` - ClusterOptions *ClusterOptions `json:",omitempty"` -} - -// Propagation represents the propagation of a mount. -type Propagation string - -const ( - // PropagationRPrivate RPRIVATE - PropagationRPrivate Propagation = "rprivate" - // PropagationPrivate PRIVATE - PropagationPrivate Propagation = "private" - // PropagationRShared RSHARED - PropagationRShared Propagation = "rshared" - // PropagationShared SHARED - PropagationShared Propagation = "shared" - // PropagationRSlave RSLAVE - PropagationRSlave Propagation = "rslave" - // PropagationSlave SLAVE - PropagationSlave Propagation = "slave" -) - -// Propagations is the list of all valid mount propagations -var Propagations = []Propagation{ - PropagationRPrivate, - PropagationPrivate, - PropagationRShared, - PropagationShared, - PropagationRSlave, - PropagationSlave, -} - -// Consistency represents the consistency requirements of a mount. -type Consistency string - -const ( - // ConsistencyFull guarantees bind mount-like consistency - ConsistencyFull Consistency = "consistent" - // ConsistencyCached mounts can cache read data and FS structure - ConsistencyCached Consistency = "cached" - // ConsistencyDelegated mounts can cache read and written data and structure - ConsistencyDelegated Consistency = "delegated" - // ConsistencyDefault provides "consistent" behavior unless overridden - ConsistencyDefault Consistency = "default" -) - -// BindOptions defines options specific to mounts of type "bind". -type BindOptions struct { - Propagation Propagation `json:",omitempty"` - NonRecursive bool `json:",omitempty"` - CreateMountpoint bool `json:",omitempty"` - // ReadOnlyNonRecursive makes the mount non-recursively read-only, but still leaves the mount recursive - // (unless NonRecursive is set to true in conjunction). - ReadOnlyNonRecursive bool `json:",omitempty"` - // ReadOnlyForceRecursive raises an error if the mount cannot be made recursively read-only. - ReadOnlyForceRecursive bool `json:",omitempty"` -} - -// VolumeOptions represents the options for a mount of type volume. -type VolumeOptions struct { - NoCopy bool `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - DriverConfig *Driver `json:",omitempty"` -} - -// Driver represents a volume driver. -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TmpfsOptions defines options specific to mounts of type "tmpfs". -type TmpfsOptions struct { - // Size sets the size of the tmpfs, in bytes. - // - // This will be converted to an operating system specific value - // depending on the host. For example, on linux, it will be converted to - // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with - // docker, uses a straight byte value. - // - // Percentages are not supported. - SizeBytes int64 `json:",omitempty"` - // Mode of the tmpfs upon creation - Mode os.FileMode `json:",omitempty"` - - // TODO(stevvooe): There are several more tmpfs flags, specified in the - // daemon, that are accepted. Only the most basic are added for now. - // - // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 - // - // var validFlags = map[string]bool{ - // "": true, - // "size": true, X - // "mode": true, X - // "uid": true, - // "gid": true, - // "nr_inodes": true, - // "nr_blocks": true, - // "mpol": true, - // } - // - // Some of these may be straightforward to add, but others, such as - // uid/gid have implications in a clustered system. -} - -// ClusterOptions specifies options for a Cluster volume. -type ClusterOptions struct { - // intentionally empty -} diff --git a/vendor/github.com/docker/docker/api/types/network/ipam.go b/vendor/github.com/docker/docker/api/types/network/ipam.go deleted file mode 100644 index 491e7bc360..0000000000 --- a/vendor/github.com/docker/docker/api/types/network/ipam.go +++ /dev/null @@ -1,132 +0,0 @@ -package network - -import ( - "errors" - "fmt" - "net/netip" - - "github.com/docker/docker/internal/multierror" -) - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string // Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -type ipFamily string - -const ( - ip4 ipFamily = "IPv4" - ip6 ipFamily = "IPv6" -) - -func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error { - if ipam == nil { - return nil - } - - var errs []error - for _, cfg := range ipam.Config { - subnet, err := netip.ParsePrefix(cfg.Subnet) - if err != nil { - errs = append(errs, fmt.Errorf("invalid subnet %s: invalid CIDR block notation", cfg.Subnet)) - continue - } - subnetFamily := ip4 - if subnet.Addr().Is6() { - subnetFamily = ip6 - } - - if subnet != subnet.Masked() { - errs = append(errs, fmt.Errorf("invalid subnet %s: it should be %s", subnet, subnet.Masked())) - } - - if !enableIPv6 && subnetFamily == ip6 { - errs = append(errs, fmt.Errorf("invalid subnet %s: IPv6 has not been enabled for this network", subnet)) - } - - if ipRangeErrs := validateIPRange(cfg.IPRange, subnet, subnetFamily); len(ipRangeErrs) > 0 { - errs = append(errs, ipRangeErrs...) - } - - if err := validateAddress(cfg.Gateway, subnet, subnetFamily); err != nil { - errs = append(errs, fmt.Errorf("invalid gateway %s: %w", cfg.Gateway, err)) - } - - for auxName, aux := range cfg.AuxAddress { - if err := validateAddress(aux, subnet, subnetFamily); err != nil { - errs = append(errs, fmt.Errorf("invalid auxiliary address %s: %w", auxName, err)) - } - } - } - - if err := multierror.Join(errs...); err != nil { - return fmt.Errorf("invalid network config:\n%w", err) - } - - return nil -} - -func validateIPRange(ipRange string, subnet netip.Prefix, subnetFamily ipFamily) []error { - if ipRange == "" { - return nil - } - prefix, err := netip.ParsePrefix(ipRange) - if err != nil { - return []error{fmt.Errorf("invalid ip-range %s: invalid CIDR block notation", ipRange)} - } - family := ip4 - if prefix.Addr().Is6() { - family = ip6 - } - - if family != subnetFamily { - return []error{fmt.Errorf("invalid ip-range %s: parent subnet is an %s block", ipRange, subnetFamily)} - } - - var errs []error - if prefix.Bits() < subnet.Bits() { - errs = append(errs, fmt.Errorf("invalid ip-range %s: CIDR block is bigger than its parent subnet %s", ipRange, subnet)) - } - if prefix != prefix.Masked() { - errs = append(errs, fmt.Errorf("invalid ip-range %s: it should be %s", prefix, prefix.Masked())) - } - if !subnet.Overlaps(prefix) { - errs = append(errs, fmt.Errorf("invalid ip-range %s: parent subnet %s doesn't contain ip-range", ipRange, subnet)) - } - - return errs -} - -func validateAddress(address string, subnet netip.Prefix, subnetFamily ipFamily) error { - if address == "" { - return nil - } - addr, err := netip.ParseAddr(address) - if err != nil { - return errors.New("invalid address") - } - family := ip4 - if addr.Is6() { - family = ip6 - } - - if family != subnetFamily { - return fmt.Errorf("parent subnet is an %s block", subnetFamily) - } - if !subnet.Contains(addr) { - return fmt.Errorf("parent subnet %s doesn't contain this address", subnet) - } - - return nil -} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go deleted file mode 100644 index 6c5ef9ca40..0000000000 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ /dev/null @@ -1,111 +0,0 @@ -package network // import "github.com/docker/docker/api/types/network" -import ( - "github.com/docker/docker/api/types/filters" -) - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - -// PeerInfo represents one peer of an overlay network -type PeerInfo struct { - Name string - IP string -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string - DriverOpts map[string]string -} - -// Task carries the information about one backend task -type Task struct { - Name string - EndpointID string - EndpointIP string - Info map[string]string -} - -// ServiceInfo represents service parameters with the list of service's tasks -type ServiceInfo struct { - VIP string - Ports []string - LocalLBIndex int - Tasks []Task -} - -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - return &epCopy -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} - -// ConfigReference specifies the source which provides a network's configuration -type ConfigReference struct { - Network string -} - -var acceptedFilters = map[string]bool{ - "dangling": true, - "driver": true, - "id": true, - "label": true, - "name": true, - "scope": true, - "type": true, -} - -// ValidateFilters validates the list of filter args with the available filters. -func ValidateFilters(filter filters.Args) error { - return filter.Validate(acceptedFilters) -} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go deleted file mode 100644 index abae48b9ab..0000000000 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ /dev/null @@ -1,203 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Plugin A plugin for the Engine API -// swagger:model Plugin -type Plugin struct { - - // config - // Required: true - Config PluginConfig `json:"Config"` - - // True if the plugin is running. False if the plugin is not running, only installed. - // Required: true - Enabled bool `json:"Enabled"` - - // Id - ID string `json:"Id,omitempty"` - - // name - // Required: true - Name string `json:"Name"` - - // plugin remote reference used to push/pull the plugin - PluginReference string `json:"PluginReference,omitempty"` - - // settings - // Required: true - Settings PluginSettings `json:"Settings"` -} - -// PluginConfig The config of a plugin. -// swagger:model PluginConfig -type PluginConfig struct { - - // args - // Required: true - Args PluginConfigArgs `json:"Args"` - - // description - // Required: true - Description string `json:"Description"` - - // Docker Version used to create the plugin - DockerVersion string `json:"DockerVersion,omitempty"` - - // documentation - // Required: true - Documentation string `json:"Documentation"` - - // entrypoint - // Required: true - Entrypoint []string `json:"Entrypoint"` - - // env - // Required: true - Env []PluginEnv `json:"Env"` - - // interface - // Required: true - Interface PluginConfigInterface `json:"Interface"` - - // ipc host - // Required: true - IpcHost bool `json:"IpcHost"` - - // linux - // Required: true - Linux PluginConfigLinux `json:"Linux"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` - - // network - // Required: true - Network PluginConfigNetwork `json:"Network"` - - // pid host - // Required: true - PidHost bool `json:"PidHost"` - - // propagated mount - // Required: true - PropagatedMount string `json:"PropagatedMount"` - - // user - User PluginConfigUser `json:"User,omitempty"` - - // work dir - // Required: true - WorkDir string `json:"WorkDir"` - - // rootfs - Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` -} - -// PluginConfigArgs plugin config args -// swagger:model PluginConfigArgs -type PluginConfigArgs struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value []string `json:"Value"` -} - -// PluginConfigInterface The interface between Docker and the plugin -// swagger:model PluginConfigInterface -type PluginConfigInterface struct { - - // Protocol to use for clients connecting to the plugin. - ProtocolScheme string `json:"ProtocolScheme,omitempty"` - - // socket - // Required: true - Socket string `json:"Socket"` - - // types - // Required: true - Types []PluginInterfaceType `json:"Types"` -} - -// PluginConfigLinux plugin config linux -// swagger:model PluginConfigLinux -type PluginConfigLinux struct { - - // allow all devices - // Required: true - AllowAllDevices bool `json:"AllowAllDevices"` - - // capabilities - // Required: true - Capabilities []string `json:"Capabilities"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` -} - -// PluginConfigNetwork plugin config network -// swagger:model PluginConfigNetwork -type PluginConfigNetwork struct { - - // type - // Required: true - Type string `json:"Type"` -} - -// PluginConfigRootfs plugin config rootfs -// swagger:model PluginConfigRootfs -type PluginConfigRootfs struct { - - // diff ids - DiffIds []string `json:"diff_ids"` - - // type - Type string `json:"type,omitempty"` -} - -// PluginConfigUser plugin config user -// swagger:model PluginConfigUser -type PluginConfigUser struct { - - // g ID - GID uint32 `json:"GID,omitempty"` - - // UID - UID uint32 `json:"UID,omitempty"` -} - -// PluginSettings Settings that can be modified by users. -// swagger:model PluginSettings -type PluginSettings struct { - - // args - // Required: true - Args []string `json:"Args"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` - - // env - // Required: true - Env []string `json:"Env"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go deleted file mode 100644 index 5699010675..0000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_device.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginDevice plugin device -// swagger:model PluginDevice -type PluginDevice struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // path - // Required: true - Path *string `json:"Path"` - - // settable - // Required: true - Settable []string `json:"Settable"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go deleted file mode 100644 index 32962dc2eb..0000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_env.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginEnv plugin env -// swagger:model PluginEnv -type PluginEnv struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value *string `json:"Value"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go deleted file mode 100644 index c82f204e87..0000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginInterfaceType plugin interface type -// swagger:model PluginInterfaceType -type PluginInterfaceType struct { - - // capability - // Required: true - Capability string `json:"Capability"` - - // prefix - // Required: true - Prefix string `json:"Prefix"` - - // version - // Required: true - Version string `json:"Version"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go deleted file mode 100644 index 5c031cf8b5..0000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_mount.go +++ /dev/null @@ -1,37 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginMount plugin mount -// swagger:model PluginMount -type PluginMount struct { - - // description - // Required: true - Description string `json:"Description"` - - // destination - // Required: true - Destination string `json:"Destination"` - - // name - // Required: true - Name string `json:"Name"` - - // options - // Required: true - Options []string `json:"Options"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // source - // Required: true - Source *string `json:"Source"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go deleted file mode 100644 index 60d1fb5ad8..0000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ /dev/null @@ -1,71 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "encoding/json" - "fmt" - "sort" -) - -// PluginsListResponse contains the response for the Engine API -type PluginsListResponse []*Plugin - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege - -func (s PluginPrivileges) Len() int { - return len(s) -} - -func (s PluginPrivileges) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -func (s PluginPrivileges) Swap(i, j int) { - sort.Strings(s[i].Value) - sort.Strings(s[j].Value) - s[i], s[j] = s[j], s[i] -} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go deleted file mode 100644 index d91234744c..0000000000 --- a/vendor/github.com/docker/docker/api/types/port.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Port An open port on a container -// swagger:model Port -type Port struct { - - // Host IP address that the container's port is mapped to - IP string `json:"IP,omitempty"` - - // Port on the container - // Required: true - PrivatePort uint16 `json:"PrivatePort"` - - // Port exposed on the host - PublicPort uint16 `json:"PublicPort,omitempty"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go deleted file mode 100644 index 97a924e374..0000000000 --- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go +++ /dev/null @@ -1,99 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" -import ( - "encoding/base64" - "encoding/json" - "io" - "strings" - - "github.com/pkg/errors" -) - -// AuthHeader is the name of the header used to send encoded registry -// authorization credentials for registry operations (push/pull). -const AuthHeader = "X-Registry-Auth" - -// AuthConfig contains authorization information for connecting to a Registry. -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} - -// EncodeAuthConfig serializes the auth configuration as a base64url encoded -// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header. -// -// For details on base64url encoding, see: -// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 -func EncodeAuthConfig(authConfig AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", errInvalidParameter{err} - } - return base64.URLEncoding.EncodeToString(buf), nil -} - -// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON -// authentication information as sent through the X-Registry-Auth header. -// -// This function always returns an AuthConfig, even if an error occurs. It is up -// to the caller to decide if authentication is required, and if the error can -// be ignored. -// -// For details on base64url encoding, see: -// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 -func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { - if authEncoded == "" { - return &AuthConfig{}, nil - } - - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - return decodeAuthConfigFromReader(authJSON) -} - -// DecodeAuthConfigBody decodes authentication information as sent as JSON in the -// body of a request. This function is to provide backward compatibility with old -// clients and API versions. Current clients and API versions expect authentication -// to be provided through the X-Registry-Auth header. -// -// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an -// error occurs. It is up to the caller to decide if authentication is required, -// and if the error can be ignored. -func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) { - return decodeAuthConfigFromReader(rdr) -} - -func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) { - authConfig := &AuthConfig{} - if err := json.NewDecoder(rdr).Decode(authConfig); err != nil { - // always return an (empty) AuthConfig to increase compatibility with - // the existing API. - return &AuthConfig{}, invalid(err) - } - return authConfig, nil -} - -func invalid(err error) error { - return errInvalidParameter{errors.Wrap(err, "invalid X-Registry-Auth header")} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { return e.error } - -func (e errInvalidParameter) Unwrap() error { return e.error } diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go deleted file mode 100644 index f0a2113e40..0000000000 --- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ /dev/null @@ -1,21 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// AuthenticateOKBody authenticate o k body -// swagger:model AuthenticateOKBody -type AuthenticateOKBody struct { - - // An opaque token used to authenticate a user after a successful login - // Required: true - IdentityToken string `json:"IdentityToken"` - - // The status of the authentication - // Required: true - Status string `json:"Status"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go deleted file mode 100644 index 05cb31075f..0000000000 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ /dev/null @@ -1,122 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -import ( - "encoding/json" - "net" - - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet - AllowNondistributableArtifactsHostnames []string - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// String returns the CIDR notation of ipnet -func (ipnet *NetIPNet) String() string { - return (*net.IPNet)(ipnet).String() -} - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated. - // - // Deprecated: the "is_automated" field is deprecated and will always be "false" in the future. - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} - -// DistributionInspect describes the result obtained from contacting the -// registry to retrieve image metadata -type DistributionInspect struct { - // Descriptor contains information about the manifest, including - // the content addressable digest - Descriptor ocispec.Descriptor - // Platforms contains the list of platforms supported by the image, - // obtained by parsing the manifest - Platforms []ocispec.Platform -} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go deleted file mode 100644 index 74ea64b1bb..0000000000 --- a/vendor/github.com/docker/docker/api/types/service_update_response.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ServiceUpdateResponse service update response -// swagger:model ServiceUpdateResponse -type ServiceUpdateResponse struct { - - // Optional warning messages - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go deleted file mode 100644 index 20daebed14..0000000000 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ /dev/null @@ -1,181 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types // import "github.com/docker/docker/api/types" - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container. -// Not used on Windows. -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds (Linux) - // Units: 100's of nanoseconds (Windows) - TotalUsage uint64 `json:"total_usage"` - - // Total CPU time consumed per core (Linux). Not used on Windows. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage,omitempty"` - - // Time spent by tasks of the cgroup in kernel mode (Linux). - // Time spent by all container processes in kernel mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - - // Time spent by tasks of the cgroup in user mode (Linux). - // Time spent by all container processes in user mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - // CPU Usage. Linux and Windows. - CPUUsage CPUUsage `json:"cpu_usage"` - - // System Usage. Linux only. - SystemUsage uint64 `json:"system_cpu_usage,omitempty"` - - // Online CPUs. Linux only. - OnlineCPUs uint32 `json:"online_cpus,omitempty"` - - // Throttling Data. Linux only. - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates all memory stats since container inception on Linux. -// Windows returns stats for commit and private working set only. -type MemoryStats struct { - // Linux Memory Stats - - // current res_counter usage for memory - Usage uint64 `json:"usage,omitempty"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage,omitempty"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats,omitempty"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty"` - - // Windows Memory Stats - // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx - - // committed bytes - Commit uint64 `json:"commitbytes,omitempty"` - // peak committed bytes - CommitPeak uint64 `json:"commitpeakbytes,omitempty"` - // private working set - PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// Not used on Windows. -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write. -// This is a Linux specific structure as the differences between expressing -// block I/O on Windows and Linux are sufficiently significant to make -// little sense attempting to morph into a combined structure. -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// StorageStats is the disk I/O stats for read/write on Windows. -type StorageStats struct { - ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` - ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` - WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` - WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` -} - -// NetworkStats aggregates the network stats of one container -type NetworkStats struct { - // Bytes received. Windows and Linux. - RxBytes uint64 `json:"rx_bytes"` - // Packets received. Windows and Linux. - RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we don't `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - RxErrors uint64 `json:"rx_errors"` - // Incoming packets dropped. Windows and Linux. - RxDropped uint64 `json:"rx_dropped"` - // Bytes sent. Windows and Linux. - TxBytes uint64 `json:"tx_bytes"` - // Packets sent. Windows and Linux. - TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we don't `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - TxErrors uint64 `json:"tx_errors"` - // Outgoing packets dropped. Windows and Linux. - TxDropped uint64 `json:"tx_dropped"` - // Endpoint ID. Not used on Linux. - EndpointID string `json:"endpoint_id,omitempty"` - // Instance ID. Not used on Linux. - InstanceID string `json:"instance_id,omitempty"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - // Common stats - Read time.Time `json:"read"` - PreRead time.Time `json:"preread"` - - // Linux specific stats, not populated on Windows. - PidsStats PidsStats `json:"pids_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - - // Windows specific stats, not populated on Linux. - NumProcs uint32 `json:"num_procs"` - StorageStats StorageStats `json:"storage_stats,omitempty"` - - // Shared stats - CPUStats CPUStats `json:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" - MemoryStats MemoryStats `json:"memory_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go deleted file mode 100644 index 82921cebc1..0000000000 --- a/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice // import "github.com/docker/docker/api/types/strslice" - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go deleted file mode 100644 index 5ded7dba8a..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ /dev/null @@ -1,48 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "strconv" - "time" -) - -// Version represents the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// String implements fmt.Stringer interface. -func (v Version) String() string { - return strconv.FormatUint(v.Index, 10) -} - -// Meta is a base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:"Labels"` -} - -// Driver represents a driver (network, logging, secrets backend). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TLSInfo represents the TLS information about what CA certificate is trusted, -// and who the issuer for a TLS certificate is -type TLSInfo struct { - // TrustRoot is the trusted CA root certificate in PEM format - TrustRoot string `json:",omitempty"` - - // CertIssuer is the raw subject bytes of the issuer - CertIssuerSubject []byte `json:",omitempty"` - - // CertIssuerPublicKey is the raw public key bytes of the issuer - CertIssuerPublicKey []byte `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go deleted file mode 100644 index 16202ccce6..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ /dev/null @@ -1,40 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Config represents a config. -type Config struct { - ID string - Meta - Spec ConfigSpec -} - -// ConfigSpec represents a config specification from a config in swarm -type ConfigSpec struct { - Annotations - Data []byte `json:",omitempty"` - - // Templating controls whether and how to evaluate the config payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// ConfigReferenceFileTarget is a file target in a config reference -type ConfigReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// ConfigReferenceRuntimeTarget is a target for a config specifying that it -// isn't mounted into the container but instead has some other purpose. -type ConfigReferenceRuntimeTarget struct{} - -// ConfigReference is a reference to a config in swarm -type ConfigReference struct { - File *ConfigReferenceFileTarget `json:",omitempty"` - Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` - ConfigID string - ConfigName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go deleted file mode 100644 index af5e1c0bc2..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ /dev/null @@ -1,80 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" -) - -// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) -// Detailed documentation is available in: -// http://man7.org/linux/man-pages/man5/resolv.conf.5.html -// `nameserver`, `search`, `options` have been supported. -// TODO: `domain` is not supported yet. -type DNSConfig struct { - // Nameservers specifies the IP addresses of the name servers - Nameservers []string `json:",omitempty"` - // Search specifies the search list for host-name lookup - Search []string `json:",omitempty"` - // Options allows certain internal resolver variables to be modified - Options []string `json:",omitempty"` -} - -// SELinuxContext contains the SELinux labels of the container. -type SELinuxContext struct { - Disable bool - - User string - Role string - Type string - Level string -} - -// CredentialSpec for managed service account (Windows only) -type CredentialSpec struct { - Config string - File string - Registry string -} - -// Privileges defines the security options for the container. -type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext -} - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Hostname string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Groups []string `json:",omitempty"` - Privileges *Privileges `json:",omitempty"` - Init *bool `json:",omitempty"` - StopSignal string `json:",omitempty"` - TTY bool `json:",omitempty"` - OpenStdin bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Mounts []mount.Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` - Healthcheck *container.HealthConfig `json:",omitempty"` - // The format of extra hosts on swarmkit is specified in: - // http://man7.org/linux/man-pages/man5/hosts.5.html - // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` - Sysctls map[string]string `json:",omitempty"` - CapabilityAdd []string `json:",omitempty"` - CapabilityDrop []string `json:",omitempty"` - Ulimits []*units.Ulimit `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go deleted file mode 100644 index 98ef3284d1..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ /dev/null @@ -1,121 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "github.com/docker/docker/api/types/network" -) - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - // TargetPort is the port inside the container - TargetPort uint32 `json:",omitempty"` - // PublishedPort is the port on the swarm hosts - PublishedPort uint32 `json:",omitempty"` - // PublishMode is the mode in which port is published - PublishMode PortConfigPublishMode `json:",omitempty"` -} - -// PortConfigPublishMode represents the mode in which the port is to -// be published. -type PortConfigPublishMode string - -const ( - // PortConfigPublishModeIngress is used for ports published - // for ingress load balancing using routing mesh. - PortConfigPublishModeIngress PortConfigPublishMode = "ingress" - // PortConfigPublishModeHost is used for ports published - // for direct host level access on the host where the task is running. - PortConfigPublishModeHost PortConfigPublishMode = "host" -) - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" - // PortConfigProtocolSCTP SCTP - PortConfigProtocolSCTP PortConfigProtocol = "sctp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - Ingress bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` - ConfigFrom *network.ConfigReference `json:",omitempty"` - Scope string `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` - DriverOpts map[string]string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go deleted file mode 100644 index bb98d5eedc..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ /dev/null @@ -1,139 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// Node represents a node. -type Node struct { - ID string - Meta - // Spec defines the desired state of the node as specified by the user. - // The system will honor this and will *never* modify it. - Spec NodeSpec `json:",omitempty"` - // Description encapsulates the properties of the Node as reported by the - // agent. - Description NodeDescription `json:",omitempty"` - // Status provides the current status of the node, as seen by the manager. - Status NodeStatus `json:",omitempty"` - // ManagerStatus provides the current status of the node's manager - // component, if the node is a manager. - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` - TLSInfo TLSInfo `json:",omitempty"` - CSIInfo []NodeCSIInfo `json:",omitempty"` -} - -// Platform represents the platform (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// NodeCSIInfo represents information about a CSI plugin available on the node -type NodeCSIInfo struct { - // PluginName is the name of the CSI plugin. - PluginName string `json:",omitempty"` - // NodeID is the ID of the node as reported by the CSI plugin. This is - // different from the swarm node ID. - NodeID string `json:",omitempty"` - // MaxVolumesPerNode is the maximum number of volumes that may be published - // to this node - MaxVolumesPerNode int64 `json:",omitempty"` - // AccessibleTopology indicates the location of this node in the CSI - // plugin's topology - AccessibleTopology *Topology `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) - -// Topology defines the CSI topology of this node. This type is a duplicate of -// github.com/docker/docker/api/types.Topology. Because the type definition -// is so simple and to avoid complicated structure or circular imports, we just -// duplicate it here. See that type for full documentation -type Topology struct { - Segments map[string]string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go deleted file mode 100644 index 0c77403ccf..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ /dev/null @@ -1,27 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// RuntimeType is the type of runtime used for the TaskSpec -type RuntimeType string - -// RuntimeURL is the proto type url -type RuntimeURL string - -const ( - // RuntimeContainer is the container based runtime - RuntimeContainer RuntimeType = "container" - // RuntimePlugin is the plugin based runtime - RuntimePlugin RuntimeType = "plugin" - // RuntimeNetworkAttachment is the network attachment runtime - RuntimeNetworkAttachment RuntimeType = "attachment" - - // RuntimeURLContainer is the proto url for the container type - RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" - // RuntimeURLPlugin is the proto url for the plugin type - RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" -) - -// NetworkAttachmentSpec represents the runtime spec type for network -// attachment tasks -type NetworkAttachmentSpec struct { - ContainerID string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 292bd7afc8..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc --gogofaster_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index 32aaf0d519..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,808 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: plugin.proto - -package runtime - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges,proto3" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_22a625af4bc1cc87, []int{0} -} -func (m *PluginSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginSpec.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PluginSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginSpec.Merge(m, src) -} -func (m *PluginSpec) XXX_Size() int { - return m.Size() -} -func (m *PluginSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PluginSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginSpec proto.InternalMessageInfo - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -func (m *PluginSpec) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value,proto3" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { - return fileDescriptor_22a625af4bc1cc87, []int{1} -} -func (m *PluginPrivilege) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginPrivilege) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginPrivilege.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PluginPrivilege) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginPrivilege.Merge(m, src) -} -func (m *PluginPrivilege) XXX_Size() int { - return m.Size() -} -func (m *PluginPrivilege) XXX_DiscardUnknown() { - xxx_messageInfo_PluginPrivilege.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginPrivilege proto.InternalMessageInfo - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} - -func init() { proto.RegisterFile("plugin.proto", fileDescriptor_22a625af4bc1cc87) } - -var fileDescriptor_22a625af4bc1cc87 = []byte{ - // 225 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, - 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x9a, 0xc1, 0xc8, 0xc5, 0x15, 0x00, 0x16, - 0x08, 0x2e, 0x48, 0x4d, 0x16, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, - 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x85, 0xc4, 0xb8, 0xd8, 0x8a, 0x52, 0x73, 0xf3, 0x4b, 0x52, 0x25, - 0x98, 0xc0, 0xa2, 0x50, 0x9e, 0x90, 0x01, 0x17, 0x57, 0x41, 0x51, 0x66, 0x59, 0x66, 0x4e, 0x6a, - 0x7a, 0x6a, 0xb1, 0x04, 0xb3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x80, 0x1e, 0xc4, 0xb0, 0x00, 0x98, - 0x44, 0x10, 0x92, 0x1a, 0x21, 0x29, 0x2e, 0x8e, 0x94, 0xcc, 0xe2, 0xc4, 0xa4, 0x9c, 0xd4, 0x14, - 0x09, 0x16, 0x05, 0x46, 0x0d, 0x8e, 0x20, 0x38, 0x5f, 0x48, 0x80, 0x8b, 0x39, 0x35, 0xaf, 0x4c, - 0x82, 0x55, 0x81, 0x59, 0x83, 0x33, 0x08, 0xc4, 0x54, 0x8a, 0xe5, 0xe2, 0x47, 0x33, 0x0c, 0xab, - 0xf3, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, - 0x6e, 0x44, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x91, 0x33, - 0x08, 0xc2, 0x71, 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, - 0x70, 0xd0, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x37, 0xea, 0xe2, 0xca, 0x2a, 0x01, 0x00, - 0x00, -} - -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Env[iNdEx]) - copy(dAtA[i:], m.Env[iNdEx]) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Env[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if m.Disabled { - i-- - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Privileges) > 0 { - for iNdEx := len(m.Privileges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Privileges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPlugin(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Remote) > 0 { - i -= len(m.Remote) - copy(dAtA[i:], m.Remote) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginPrivilege) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - for iNdEx := len(m.Value) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Value[iNdEx]) - copy(dAtA[i:], m.Value[iNdEx]) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Value[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - offset -= sovPlugin(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PluginSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPlugin - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPlugin - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index e311b36ba2..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; - repeated string env = 5; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go deleted file mode 100644 index d5213ec981..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ /dev/null @@ -1,36 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Secret represents a secret. -type Secret struct { - ID string - Meta - Spec SecretSpec -} - -// SecretSpec represents a secret specification from a secret in swarm -type SecretSpec struct { - Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store - - // Templating controls whether and how to evaluate the secret payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// SecretReferenceFileTarget is a file target in a secret reference -type SecretReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// SecretReference is a reference to a secret in swarm -type SecretReference struct { - File *SecretReferenceFileTarget - SecretID string - SecretName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go deleted file mode 100644 index 6eb452d24d..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ /dev/null @@ -1,202 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - PreviousSpec *ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus *UpdateStatus `json:",omitempty"` - - // ServiceStatus is an optional, extra field indicating the number of - // desired and running tasks. It is provided primarily as a shortcut to - // calculating these values client-side, which otherwise would require - // listing all tasks for a service, an operation that could be - // computation and network expensive. - ServiceStatus *ServiceStatus `json:",omitempty"` - - // JobStatus is the status of a Service which is in one of ReplicatedJob or - // GlobalJob modes. It is absent on Replicated and Global services. - JobStatus *JobStatus `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - RollbackConfig *UpdateConfig `json:",omitempty"` - - // Networks field in ServiceSpec is deprecated. The - // same field in TaskSpec should be used instead. - // This field will be removed in a future release. - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` - ReplicatedJob *ReplicatedJob `json:",omitempty"` - GlobalJob *GlobalJob `json:",omitempty"` -} - -// UpdateState is the state of a service update. -type UpdateState string - -const ( - // UpdateStateUpdating is the updating state. - UpdateStateUpdating UpdateState = "updating" - // UpdateStatePaused is the paused state. - UpdateStatePaused UpdateState = "paused" - // UpdateStateCompleted is the completed state. - UpdateStateCompleted UpdateState = "completed" - // UpdateStateRollbackStarted is the state with a rollback in progress. - UpdateStateRollbackStarted UpdateState = "rollback_started" - // UpdateStateRollbackPaused is the state with a rollback in progress. - UpdateStateRollbackPaused UpdateState = "rollback_paused" - // UpdateStateRollbackCompleted is the state with a rollback in progress. - UpdateStateRollbackCompleted UpdateState = "rollback_completed" -) - -// UpdateStatus reports the status of a service update. -type UpdateStatus struct { - State UpdateState `json:",omitempty"` - StartedAt *time.Time `json:",omitempty"` - CompletedAt *time.Time `json:",omitempty"` - Message string `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -// ReplicatedJob is the a type of Service which executes a defined Tasks -// in parallel until the specified number of Tasks have succeeded. -type ReplicatedJob struct { - // MaxConcurrent indicates the maximum number of Tasks that should be - // executing simultaneously for this job at any given time. There may be - // fewer Tasks that MaxConcurrent executing simultaneously; for example, if - // there are fewer than MaxConcurrent tasks needed to reach - // TotalCompletions. - // - // If this field is empty, it will default to a max concurrency of 1. - MaxConcurrent *uint64 `json:",omitempty"` - - // TotalCompletions is the total number of Tasks desired to run to - // completion. - // - // If this field is empty, the value of MaxConcurrent will be used. - TotalCompletions *uint64 `json:",omitempty"` -} - -// GlobalJob is the type of a Service which executes a Task on every Node -// matching the Service's placement constraints. These tasks run to completion -// and then exit. -// -// This type is deliberately empty. -type GlobalJob struct{} - -const ( - // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" - // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" - // UpdateFailureActionRollback ROLLBACK - UpdateFailureActionRollback = "rollback" - - // UpdateOrderStopFirst STOP_FIRST - UpdateOrderStopFirst = "stop-first" - // UpdateOrderStartFirst START_FIRST - UpdateOrderStartFirst = "start-first" -) - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - // Maximum number of tasks to be updated in one iteration. - // 0 means unlimited parallelism. - Parallelism uint64 - - // Amount of time between updates. - Delay time.Duration `json:",omitempty"` - - // FailureAction is the action to take when an update failures. - FailureAction string `json:",omitempty"` - - // Monitor indicates how long to monitor a task for failure after it is - // created. If the task fails by ending up in one of the states - // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, - // this counts as a failure. If it fails after Monitor, it does not - // count as a failure. If Monitor is unspecified, a default value will - // be used. - Monitor time.Duration `json:",omitempty"` - - // MaxFailureRatio is the fraction of tasks that may fail during - // an update before the failure action is invoked. Any task created by - // the current update which ends up in one of the states REJECTED, - // COMPLETED or FAILED within Monitor from its creation counts as a - // failure. The number of failures is divided by the number of tasks - // being updated, and if this fraction is greater than - // MaxFailureRatio, the failure action is invoked. - // - // If the failure action is CONTINUE, there is no effect. - // If the failure action is PAUSE, no more tasks will be updated until - // another update is started. - MaxFailureRatio float32 - - // Order indicates the order of operations when rolling out an updated - // task. Either the old task is shut down before the new task is - // started, or the new task is started before the old task is shut down. - Order string -} - -// ServiceStatus represents the number of running tasks in a service and the -// number of tasks desired to be running. -type ServiceStatus struct { - // RunningTasks is the number of tasks for the service actually in the - // Running state - RunningTasks uint64 - - // DesiredTasks is the number of tasks desired to be running by the - // service. For replicated services, this is the replica count. For global - // services, this is computed by taking the number of tasks with desired - // state of not-Shutdown. - DesiredTasks uint64 - - // CompletedTasks is the number of tasks in the state Completed, if this - // service is in ReplicatedJob or GlobalJob mode. This field must be - // cross-referenced with the service type, because the default value of 0 - // may mean that a service is not in a job mode, or it may mean that the - // job has yet to complete any tasks. - CompletedTasks uint64 -} - -// JobStatus is the status of a job-type service. -type JobStatus struct { - // JobIteration is a value increased each time a Job is executed, - // successfully or otherwise. "Executed", in this case, means the job as a - // whole has been started, not that an individual Task has been launched. A - // job is "Executed" when its ServiceSpec is updated. JobIteration can be - // used to disambiguate Tasks belonging to different executions of a job. - // - // Though JobIteration will increase with each subsequent execution, it may - // not necessarily increase by 1, and so JobIteration should not be used to - // keep track of the number of times a job has been executed. - JobIteration Version - - // LastExecution is the time that the job was last executed, as observed by - // Swarm manager. - LastExecution time.Time `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go deleted file mode 100644 index 3eae4b9b29..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ /dev/null @@ -1,237 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" -) - -// ClusterInfo represents info about the cluster for outputting in "info" -// it contains the same information as "Swarm", but without the JoinTokens -type ClusterInfo struct { - ID string - Meta - Spec Spec - TLSInfo TLSInfo - RootRotationInProgress bool - DefaultAddrPool []string - SubnetSize uint32 - DataPathPort uint32 -} - -// Swarm represents a swarm. -type Swarm struct { - ClusterInfo - JoinTokens JoinTokens -} - -// JoinTokens contains the tokens workers and managers need to join the swarm. -type JoinTokens struct { - // Worker is the join token workers may use to join the swarm. - Worker string - // Manager is the join token managers may use to join the swarm. - Manager string -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` - EncryptionConfig EncryptionConfig `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or - // node. If negative, never remove completed or failed tasks. - TaskHistoryRetentionLimit *int64 `json:",omitempty"` -} - -// TaskDefaults parameterizes cluster-level task creation with default values. -type TaskDefaults struct { - // LogDriver selects the log driver to use for tasks created in the - // orchestrator if unspecified by a service. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - LogDriver *Driver `json:",omitempty"` -} - -// EncryptionConfig controls at-rest encryption of data and keys. -type EncryptionConfig struct { - // AutoLockManagers specifies whether or not managers TLS keys and raft data - // should be encrypted at rest in such a way that they must be unlocked - // before the manager node starts up again. - AutoLockManagers bool -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - // SnapshotInterval is the number of log entries between snapshots. - SnapshotInterval uint64 `json:",omitempty"` - - // KeepOldSnapshots is the number of snapshots to keep beyond the - // current snapshot. - KeepOldSnapshots *uint64 `json:",omitempty"` - - // LogEntriesForSlowFollowers is the number of log entries to keep - // around to sync up slow followers after a snapshot is created. - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - - // ElectionTick is the number of ticks that a follower will wait for a message - // from the leader before becoming a candidate and starting an election. - // ElectionTick must be greater than HeartbeatTick. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - ElectionTick int - - // HeartbeatTick is the number of ticks between heartbeats. Every - // HeartbeatTick ticks, the leader will send a heartbeat to the - // followers. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - HeartbeatTick int -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - // HeartbeatPeriod defines how often agent should send heartbeats to - // dispatcher. - HeartbeatPeriod time.Duration `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - // NodeCertExpiry is the duration certificates should be issued for - NodeCertExpiry time.Duration `json:",omitempty"` - - // ExternalCAs is a list of CAs to which a manager node will make - // certificate signing requests for node certificates. - ExternalCAs []*ExternalCA `json:",omitempty"` - - // SigningCACert and SigningCAKey specify the desired signing root CA and - // root CA key for the swarm. When inspecting the cluster, the key will - // be redacted. - SigningCACert string `json:",omitempty"` - SigningCAKey string `json:",omitempty"` - - // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. - ForceRotate uint64 `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - // Protocol is the protocol used by this external CA. - Protocol ExternalCAProtocol - - // URL is the URL where the external CA can be reached. - URL string - - // Options is a set of additional key/value pairs whose interpretation - // depends on the specified CA type. - Options map[string]string `json:",omitempty"` - - // CACert specifies which root CA is used by this external CA. This certificate must - // be in PEM format. - CACert string -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - DataPathPort uint32 - ForceNewCluster bool - Spec Spec - AutoLockManagers bool - Availability NodeAvailability - DefaultAddrPool []string - SubnetSize uint32 -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - RemoteAddrs []string - JoinToken string // accept by secret - Availability NodeAvailability -} - -// UnlockRequest is the request used to unlock a swarm. -type UnlockRequest struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" - // LocalNodeStateLocked LOCKED - LocalNodeStateLocked LocalNodeState = "locked" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - NodeAddr string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int `json:",omitempty"` - Managers int `json:",omitempty"` - - Cluster *ClusterInfo `json:",omitempty"` - - Warnings []string `json:",omitempty"` -} - -// Status provides information about the current swarm status and role, -// obtained from the "Swarm" header in the API response. -type Status struct { - // NodeState represents the state of the node. - NodeState LocalNodeState - - // ControlAvailable indicates if the node is a swarm manager. - ControlAvailable bool -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} - -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool - RotateManagerUnlockKey bool -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go deleted file mode 100644 index ad3eeca0b7..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ /dev/null @@ -1,225 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/swarm/runtime" -) - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" - // TaskStateRemove REMOVE - TaskStateRemove TaskState = "remove" - // TaskStateOrphaned ORPHANED - TaskStateOrphaned TaskState = "orphaned" -) - -// Task represents a task. -type Task struct { - ID string - Meta - Annotations - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` - - // JobIteration is the JobIteration of the Service that this Task was - // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is - // used to determine which Tasks belong to which run of the job. This field - // is absent if the Service mode is Replicated or Global. - JobIteration *Version `json:",omitempty"` - - // Volumes is the list of VolumeAttachments for this task. It specifies - // which particular volumes are to be used by this particular task, and - // fulfilling what mounts in the spec. - Volumes []VolumeAttachment -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. - // PluginSpec is only used when the `Runtime` field is set to `plugin` - // NetworkAttachmentSpec is used if the `Runtime` field is set to - // `attachment`. - ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` - NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` - - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` - - // ForceUpdate is a counter that triggers an update even if no relevant - // parameters have been changed. - ForceUpdate uint64 - - Runtime RuntimeType `json:",omitempty"` -} - -// Resources represents resources (CPU/Memory) which can be advertised by a -// node and requested to be reserved for a task. -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// Limit describes limits on resources which can be requested by a task. -type Limit struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - Pids int64 `json:",omitempty"` -} - -// GenericResource represents a "user defined" resource which can -// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) -type GenericResource struct { - NamedResourceSpec *NamedGenericResource `json:",omitempty"` - DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` -} - -// NamedGenericResource represents a "user defined" resource which is defined -// as a string. -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) -type NamedGenericResource struct { - Kind string `json:",omitempty"` - Value string `json:",omitempty"` -} - -// DiscreteGenericResource represents a "user defined" resource which is defined -// as an integer -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to count the resource (SSD=5, HDD=3, ...) -type DiscreteGenericResource struct { - Kind string `json:",omitempty"` - Value int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Limit `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` - Preferences []PlacementPreference `json:",omitempty"` - MaxReplicas uint64 `json:",omitempty"` - - // Platforms stores all the platforms that the image can run on. - // This field is used in the platform filter for scheduling. If empty, - // then the platform filter is off, meaning there are no scheduling restrictions. - Platforms []Platform `json:",omitempty"` -} - -// PlacementPreference provides a way to make the scheduler aware of factors -// such as topology. -type PlacementPreference struct { - Spread *SpreadOver -} - -// SpreadOver is a scheduling preference that instructs the scheduler to spread -// tasks evenly over groups of nodes identified by labels. -type SpreadOver struct { - // label descriptor, such as engine.labels.az - SpreadDescriptor string -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus *ContainerStatus `json:",omitempty"` - PortStatus PortStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string - PID int - ExitCode int -} - -// PortStatus represents the port status of a task's host ports whose -// service has published host ports -type PortStatus struct { - Ports []PortConfig `json:",omitempty"` -} - -// VolumeAttachment contains the associating a Volume to a Task. -type VolumeAttachment struct { - // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. - ID string `json:",omitempty"` - - // Source, together with Target, indicates the Mount, as specified in the - // ContainerSpec, that this volume fulfills. - Source string `json:",omitempty"` - - // Target, together with Source, indicates the Mount, as specified - // in the ContainerSpec, that this volume fulfills. - Target string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go deleted file mode 100644 index 09dbbd0926..0000000000 --- a/vendor/github.com/docker/docker/api/types/system/info.go +++ /dev/null @@ -1,116 +0,0 @@ -package system - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" -) - -// Info contains response of Engine API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - PidsLimit bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - LoggingDriver string - CgroupDriver string - CgroupVersion string `json:",omitempty"` - NEventsListener int - KernelVersion string - OperatingSystem string - OSVersion string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - GenericResources []swarm.GenericResource - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool - Isolation container.Isolation - InitBinary string - ContainerdCommit Commit - RuncCommit Commit - InitCommit Commit - SecurityOptions []string - ProductLicense string `json:",omitempty"` - DefaultAddressPools []NetworkAddressPool `json:",omitempty"` - CDISpecDirs []string - - // Legacy API fields for older API versions. - legacyFields - - // Warnings contains a slice of warnings that occurred while collecting - // system information. These warnings are intended to be informational - // messages for the user, and are not intended to be parsed / used for - // other purposes, as they do not have a fixed format. - Warnings []string -} - -type legacyFields struct { - ExecutionDriver string `json:",omitempty"` // Deprecated: deprecated since API v1.25, but returned for older versions. -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by [Info] struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string - // List of Log plugins registered - Log []string -} - -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. -type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. -} - -// NetworkAddressPool is a temp struct used by [Info] struct. -type NetworkAddressPool struct { - Base string - Size int -} diff --git a/vendor/github.com/docker/docker/api/types/system/runtime.go b/vendor/github.com/docker/docker/api/types/system/runtime.go deleted file mode 100644 index 83433acf92..0000000000 --- a/vendor/github.com/docker/docker/api/types/system/runtime.go +++ /dev/null @@ -1,14 +0,0 @@ -package system - -// Runtime describes an OCI runtime -type Runtime struct { - // "Legacy" runtime configuration for runc-compatible runtimes. - - Path string `json:"path,omitempty"` - Args []string `json:"runtimeArgs,omitempty"` - - // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. - - Type string `json:"runtimeType,omitempty"` - Options map[string]interface{} `json:"options,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/system/security_opts.go b/vendor/github.com/docker/docker/api/types/system/security_opts.go deleted file mode 100644 index edff3eb1ac..0000000000 --- a/vendor/github.com/docker/docker/api/types/system/security_opts.go +++ /dev/null @@ -1,48 +0,0 @@ -package system - -import ( - "errors" - "fmt" - "strings" -) - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a -// type-safe [SecurityOpt]. -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - for _, s := range strings.Split(opt, ",") { - k, v, ok := strings.Cut(s, "=") - if !ok { - return nil, fmt.Errorf("invalid security option %q", s) - } - if k == "" || v == "" { - return nil, errors.New("invalid empty security option") - } - if k == "name" { - secopt.Name = v - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v}) - } - so = append(so, secopt) - } - return so, nil -} - -// KeyValue holds a key/value pair. -type KeyValue struct { - Key, Value string -} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go deleted file mode 100644 index cab5c32e3f..0000000000 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ /dev/null @@ -1,131 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// These are additional predefined layouts for use in Time.Format and Time.Parse -// with --since and --until parameters for `docker logs` and `docker events` -const ( - rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone - rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone - dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 - dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 -) - -// GetTimestamp tries to parse given string as golang duration, -// then RFC3339 time and finally as a Unix timestamp. If -// any of these were successful, it returns a Unix timestamp -// as string otherwise returns the given value back. -// In case of duration input, the returned timestamp is computed -// as the given reference time minus the amount of the duration. -func GetTimestamp(value string, reference time.Time) (string, error) { - if d, err := time.ParseDuration(value); value != "0" && err == nil { - return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil - } - - var format string - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - - if strings.Contains(value, ".") { - if parseInLocation { - format = rFC3339NanoLocal - } else { - format = time.RFC3339Nano - } - } else if strings.Contains(value, "T") { - // we want the number of colons in the T portion of the timestamp - tcolons := strings.Count(value, ":") - // if parseInLocation is off and we have a +/- zone offset (not Z) then - // there will be an extra colon in the input for the tz offset subtract that - // colon from the tcolons count - if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { - tcolons-- - } - if parseInLocation { - switch tcolons { - case 0: - format = "2006-01-02T15" - case 1: - format = "2006-01-02T15:04" - default: - format = rFC3339Local - } - } else { - switch tcolons { - case 0: - format = "2006-01-02T15Z07:00" - case 1: - format = "2006-01-02T15:04Z07:00" - default: - format = time.RFC3339 - } - } - } else if parseInLocation { - format = dateLocal - } else { - format = dateWithZone - } - - var t time.Time - var err error - - if parseInLocation { - t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) - } else { - t, err = time.Parse(format, value) - } - - if err != nil { - // if there is a `-` then it's an RFC3339 like timestamp - if strings.Contains(value, "-") { - return "", err // was probably an RFC3339 like timestamp but the parser failed with an error - } - if _, _, err := parseTimestamp(value); err != nil { - return "", fmt.Errorf("failed to parse value as time or duration: %q", value) - } - return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) - } - - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil -} - -// ParseTimestamps returns seconds and nanoseconds from a timestamp that has -// the format ("%d.%09d", time.Unix(), int64(time.Nanosecond())). -// If the incoming nanosecond portion is longer than 9 digits it is truncated. -// The expectation is that the seconds and nanoseconds will be used to create a -// time variable. For example: -// -// seconds, nanoseconds, _ := ParseTimestamp("1136073600.000000001",0) -// since := time.Unix(seconds, nanoseconds) -// -// returns seconds as defaultSeconds if value == "" -func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, err error) { - if value == "" { - return defaultSeconds, 0, nil - } - return parseTimestamp(value) -} - -func parseTimestamp(value string) (sec int64, nsec int64, err error) { - s, n, ok := strings.Cut(value, ".") - sec, err = strconv.ParseInt(s, 10, 64) - if err != nil { - return sec, 0, err - } - if !ok { - return sec, 0, nil - } - nsec, err = strconv.ParseInt(n, 10, 64) - if err != nil { - return sec, nsec, err - } - // should already be in nanoseconds but just in case convert n to nanoseconds - nsec = int64(float64(nsec) * math.Pow(float64(10), float64(9-len(n)))) - return sec, nsec, nil -} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go deleted file mode 100644 index a213f49192..0000000000 --- a/vendor/github.com/docker/docker/api/types/types.go +++ /dev/null @@ -1,633 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "io" - "os" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/volume" - "github.com/docker/go-connections/nat" -) - -const ( - // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams - MediaTypeRawStream = "application/vnd.docker.raw-stream" - - // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams - MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" -) - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string `json:",omitempty"` - Layers []string `json:",omitempty"` -} - -// ImageInspect contains response of Engine API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - // ID is the content-addressable ID of an image. - // - // This identifier is a content-addressable digest calculated from the - // image's configuration (which includes the digests of layers used by - // the image). - // - // Note that this digest differs from the `RepoDigests` below, which - // holds digests of image manifests that reference the image. - ID string `json:"Id"` - - // RepoTags is a list of image names/tags in the local image cache that - // reference this image. - // - // Multiple image tags can refer to the same image, and this list may be - // empty if no tags reference the image, in which case the image is - // "untagged", in which case it can still be referenced by its ID. - RepoTags []string - - // RepoDigests is a list of content-addressable digests of locally available - // image manifests that the image is referenced from. Multiple manifests can - // refer to the same image. - // - // These digests are usually only available if the image was either pulled - // from a registry, or if the image was pushed to a registry, which is when - // the manifest is generated and its digest calculated. - RepoDigests []string - - // Parent is the ID of the parent image. - // - // Depending on how the image was created, this field may be empty and - // is only set for images that were built/created locally. This field - // is empty if the image was pulled from an image registry. - Parent string - - // Comment is an optional message that can be set when committing or - // importing the image. - Comment string - - // Created is the date and time at which the image was created, formatted in - // RFC 3339 nano-seconds (time.RFC3339Nano). - Created string - - // Container is the ID of the container that was used to create the image. - // - // Depending on how the image was created, this field may be empty. - Container string - - // ContainerConfig is an optional field containing the configuration of the - // container that was last committed when creating the image. - // - // Previous versions of Docker builder used this field to store build cache, - // and it is not in active use anymore. - ContainerConfig *container.Config - - // DockerVersion is the version of Docker that was used to build the image. - // - // Depending on how the image was created, this field may be empty. - DockerVersion string - - // Author is the name of the author that was specified when committing the - // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. - Author string - Config *container.Config - - // Architecture is the hardware CPU architecture that the image runs on. - Architecture string - - // Variant is the CPU architecture variant (presently ARM-only). - Variant string `json:",omitempty"` - - // OS is the Operating System the image is built to run on. - Os string - - // OsVersion is the version of the Operating System the image is built to - // run on (especially for Windows). - OsVersion string `json:",omitempty"` - - // Size is the total size of the image including all layers it is composed of. - Size int64 - - // VirtualSize is the total size of the image including all layers it is - // composed of. - // - // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - VirtualSize int64 `json:"VirtualSize,omitempty"` - - // GraphDriver holds information about the storage driver used to store the - // container's and image's filesystem. - GraphDriver GraphDriverData - - // RootFS contains information about the image's RootFS, including the - // layer IDs. - RootFS RootFS - - // Metadata of the image in the local cache. - // - // This information is local to the daemon, and not part of the image itself. - Metadata ImageMetadata -} - -// ImageMetadata contains engine-local data about the image -type ImageMetadata struct { - // LastTagTime is the date and time at which the image was last tagged. - LastTagTime time.Time `json:",omitempty"` -} - -// Container contains response of Engine API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Engine API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerStats contains response of Engine API: -// GET "/stats" -type ContainerStats struct { - Body io.ReadCloser `json:"body"` - OSType string `json:"ostype"` -} - -// Ping contains response of Engine API: -// GET "/_ping" -type Ping struct { - APIVersion string - OSType string - Experimental bool - BuilderVersion BuilderVersion - - // SwarmStatus provides information about the current swarm status of the - // engine, obtained from the "Swarm" header in the API response. - // - // It can be a nil struct if the API version does not provide this header - // in the ping response, or if an error occurred, in which case the client - // should use other ways to get the current swarm status, such as the /swarm - // endpoint. - SwarmStatus *swarm.Status -} - -// ComponentVersion describes the version information for a specific component. -type ComponentVersion struct { - Name string - Version string - Details map[string]string `json:",omitempty"` -} - -// Version contains response of Engine API: -// GET "/version" -type Version struct { - Platform struct{ Name string } `json:",omitempty"` - Components []ComponentVersion `json:",omitempty"` - - // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility - - Version string - APIVersion string `json:"ApiVersion"` - MinAPIVersion string `json:"MinAPIVersion,omitempty"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool - // Terminal size [height, width], unused if Tty == false - ConsoleSize *[2]uint `json:",omitempty"` -} - -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - NoHealthcheck = "none" // Indicates there is no healthcheck - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerNode stores information about the node that a container -// is running on. It's only used by the Docker Swarm standalone API -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} - -// ContainerJSONBase contains response of Engine API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API - Name string - RestartCount int - Driver string - Platform string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -// This is used for reporting the mountpoints in use by a container. -type MountPoint struct { - // Type is the type of mount, see `Type` definitions in - // github.com/docker/docker/api/types/mount.Type - Type mount.Type `json:",omitempty"` - - // Name is the name reference to the underlying data defined by `Source` - // e.g., the volume name. - Name string `json:",omitempty"` - - // Source is the source location of the mount. - // - // For volumes, this contains the storage location of the volume (within - // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains - // the source (host) part of the bind-mount. For `tmpfs` mount points, this - // field is empty. - Source string - - // Destination is the path relative to the container root (`/`) where the - // Source is mounted inside the container. - Destination string - - // Driver is the volume driver used to create the volume (if it is a volume). - Driver string `json:",omitempty"` - - // Mode is a comma separated list of options supplied by the user when - // creating the bind/volume mount. - // - // The default is platform-specific (`"z"` on Linux, empty on Windows). - Mode string - - // RW indicates whether the mount is mounted writable (read-write). - RW bool - - // Propagation describes how mounts are propagated from the host into the - // mount point, and vice-versa. Refer to the Linux kernel documentation - // for details: - // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt - // - // This field is not used on Windows. - Propagation mount.Propagation -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. - ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network - Services map[string]network.ServiceInfo `json:",omitempty"` -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - // Check for networks with duplicate names. - // Network is primarily keyed based on a random ID and not on the name. - // Network name is strictly a user-friendly alias to the network - // which is uniquely identified using ID. - // And there is no guaranteed way to check for duplicates. - // Option CheckDuplicate is there to provide a best effort checking of any networks - // which has the same name but it is not guaranteed to catch all name collisions. - CheckDuplicate bool - Driver string - Scope string - EnableIPv6 bool - IPAM *network.IPAM - Internal bool - Attachable bool - Ingress bool - ConfigOnly bool - ConfigFrom *network.ConfigReference - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// NetworkInspectOptions holds parameters to inspect network -type NetworkInspectOptions struct { - Scope string - Verbose bool -} - -// DiskUsageObject represents an object type used for disk usage query filtering. -type DiskUsageObject string - -const ( - // ContainerObject represents a container DiskUsageObject. - ContainerObject DiskUsageObject = "container" - // ImageObject represents an image DiskUsageObject. - ImageObject DiskUsageObject = "image" - // VolumeObject represents a volume DiskUsageObject. - VolumeObject DiskUsageObject = "volume" - // BuildCacheObject represents a build-cache DiskUsageObject. - BuildCacheObject DiskUsageObject = "build-cache" -) - -// DiskUsageOptions holds parameters for system disk usage query. -type DiskUsageOptions struct { - // Types specifies what object types to include in the response. If empty, - // all object types are returned. - Types []DiskUsageObject -} - -// DiskUsage contains response of Engine API: -// GET "/system/df" -type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*volume.Volume - BuildCache []*BuildCache - BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. -} - -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" -type ContainersPruneReport struct { - ContainersDeleted []string - SpaceReclaimed uint64 -} - -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune" -type VolumesPruneReport struct { - VolumesDeleted []string - SpaceReclaimed uint64 -} - -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" -type ImagesPruneReport struct { - ImagesDeleted []ImageDeleteResponseItem - SpaceReclaimed uint64 -} - -// BuildCachePruneReport contains the response for Engine API: -// POST "/build/prune" -type BuildCachePruneReport struct { - CachesDeleted []string - SpaceReclaimed uint64 -} - -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" -type NetworksPruneReport struct { - NetworksDeleted []string -} - -// SecretCreateResponse contains the information returned to a client -// on the creation of a new secret. -type SecretCreateResponse struct { - // ID is the id of the created secret. - ID string -} - -// SecretListOptions holds parameters to list secrets -type SecretListOptions struct { - Filters filters.Args -} - -// ConfigCreateResponse contains the information returned to a client -// on the creation of a new config. -type ConfigCreateResponse struct { - // ID is the id of the created config. - ID string -} - -// ConfigListOptions holds parameters to list configs -type ConfigListOptions struct { - Filters filters.Args -} - -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest string - Size int -} - -// BuildResult contains the image id of a successful build -type BuildResult struct { - ID string -} - -// BuildCache contains information about a build cache record. -type BuildCache struct { - // ID is the unique ID of the build cache record. - ID string - // Parent is the ID of the parent build cache record. - // - // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. - Parent string `json:"Parent,omitempty"` - // Parents is the list of parent build cache record IDs. - Parents []string `json:" Parents,omitempty"` - // Type is the cache record type. - Type string - // Description is a description of the build-step that produced the build cache. - Description string - // InUse indicates if the build cache is in use. - InUse bool - // Shared indicates if the build cache is shared. - Shared bool - // Size is the amount of disk space used by the build cache (in bytes). - Size int64 - // CreatedAt is the date and time at which the build cache was created. - CreatedAt time.Time - // LastUsedAt is the date and time at which the build cache was last used. - LastUsedAt *time.Time - UsageCount int -} - -// BuildCachePruneOptions hold parameters to prune the build cache -type BuildCachePruneOptions struct { - All bool - KeepStorage int64 - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go deleted file mode 100644 index 3d0174b756..0000000000 --- a/vendor/github.com/docker/docker/api/types/types_deprecated.go +++ /dev/null @@ -1,72 +0,0 @@ -package types - -import ( - "github.com/docker/docker/api/types/checkpoint" - "github.com/docker/docker/api/types/system" -) - -// CheckpointCreateOptions holds parameters to create a checkpoint from a container. -// -// Deprecated: use [checkpoint.CreateOptions]. -type CheckpointCreateOptions = checkpoint.CreateOptions - -// CheckpointListOptions holds parameters to list checkpoints for a container -// -// Deprecated: use [checkpoint.ListOptions]. -type CheckpointListOptions = checkpoint.ListOptions - -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container -// -// Deprecated: use [checkpoint.DeleteOptions]. -type CheckpointDeleteOptions = checkpoint.DeleteOptions - -// Checkpoint represents the details of a checkpoint when listing endpoints. -// -// Deprecated: use [checkpoint.Summary]. -type Checkpoint = checkpoint.Summary - -// Info contains response of Engine API: -// GET "/info" -// -// Deprecated: use [system.Info]. -type Info = system.Info - -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. -// -// Deprecated: use [system.Commit]. -type Commit = system.Commit - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by [system.Info] struct -// -// Deprecated: use [system.PluginsInfo]. -type PluginsInfo = system.PluginsInfo - -// NetworkAddressPool is a temp struct used by [system.Info] struct. -// -// Deprecated: use [system.NetworkAddressPool]. -type NetworkAddressPool = system.NetworkAddressPool - -// Runtime describes an OCI runtime. -// -// Deprecated: use [system.Runtime]. -type Runtime = system.Runtime - -// SecurityOpt contains the name and options of a security option. -// -// Deprecated: use [system.SecurityOpt]. -type SecurityOpt = system.SecurityOpt - -// KeyValue holds a key/value pair. -// -// Deprecated: use [system.KeyValue]. -type KeyValue = system.KeyValue - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// [system.SecurityOpt]. -// -// Deprecated: use [system.DecodeSecurityOptions]. -func DecodeSecurityOptions(opts []string) ([]system.SecurityOpt, error) { - return system.DecodeSecurityOptions(opts) -} diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md deleted file mode 100644 index 1ef911edb0..0000000000 --- a/vendor/github.com/docker/docker/api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -## Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go deleted file mode 100644 index 621725a36d..0000000000 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ /dev/null @@ -1,65 +0,0 @@ -package versions // import "github.com/docker/docker/api/types/versions" - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - if v1 == v2 { - return 0 - } - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - maxVer := len(currTab) - if len(otherTab) > maxVer { - maxVer = len(otherTab) - } - for i := 0; i < maxVer; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go deleted file mode 100644 index 55fc5d3899..0000000000 --- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go +++ /dev/null @@ -1,420 +0,0 @@ -package volume - -import ( - "github.com/docker/docker/api/types/swarm" -) - -// ClusterVolume contains options and information specific to, and only present -// on, Swarm CSI cluster volumes. -type ClusterVolume struct { - // ID is the Swarm ID of the volume. Because cluster volumes are Swarm - // objects, they have an ID, unlike non-cluster volumes, which only have a - // Name. This ID can be used to refer to the cluster volume. - ID string - - // Meta is the swarm metadata about this volume. - swarm.Meta - - // Spec is the cluster-specific options from which this volume is derived. - Spec ClusterVolumeSpec - - // PublishStatus contains the status of the volume as it pertains to its - // publishing on Nodes. - PublishStatus []*PublishStatus `json:",omitempty"` - - // Info is information about the global status of the volume. - Info *Info `json:",omitempty"` -} - -// ClusterVolumeSpec contains the spec used to create this volume. -type ClusterVolumeSpec struct { - // Group defines the volume group of this volume. Volumes belonging to the - // same group can be referred to by group name when creating Services. - // Referring to a volume by group instructs swarm to treat volumes in that - // group interchangeably for the purpose of scheduling. Volumes with an - // empty string for a group technically all belong to the same, emptystring - // group. - Group string `json:",omitempty"` - - // AccessMode defines how the volume is used by tasks. - AccessMode *AccessMode `json:",omitempty"` - - // AccessibilityRequirements specifies where in the cluster a volume must - // be accessible from. - // - // This field must be empty if the plugin does not support - // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the - // plugin does not support it, volume will not be created. - // - // If AccessibilityRequirements is empty, but the plugin does support - // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire - // cluster is a valid target for the volume. - AccessibilityRequirements *TopologyRequirement `json:",omitempty"` - - // CapacityRange defines the desired capacity that the volume should be - // created with. If nil, the plugin will decide the capacity. - CapacityRange *CapacityRange `json:",omitempty"` - - // Secrets defines Swarm Secrets that are passed to the CSI storage plugin - // when operating on this volume. - Secrets []Secret `json:",omitempty"` - - // Availability is the Volume's desired availability. Analogous to Node - // Availability, this allows the user to take volumes offline in order to - // update or delete them. - Availability Availability `json:",omitempty"` -} - -// Availability specifies the availability of the volume. -type Availability string - -const ( - // AvailabilityActive indicates that the volume is active and fully - // schedulable on the cluster. - AvailabilityActive Availability = "active" - - // AvailabilityPause indicates that no new workloads should use the - // volume, but existing workloads can continue to use it. - AvailabilityPause Availability = "pause" - - // AvailabilityDrain indicates that all workloads using this volume - // should be rescheduled, and the volume unpublished from all nodes. - AvailabilityDrain Availability = "drain" -) - -// AccessMode defines the access mode of a volume. -type AccessMode struct { - // Scope defines the set of nodes this volume can be used on at one time. - Scope Scope `json:",omitempty"` - - // Sharing defines the number and way that different tasks can use this - // volume at one time. - Sharing SharingMode `json:",omitempty"` - - // MountVolume defines options for using this volume as a Mount-type - // volume. - // - // Either BlockVolume or MountVolume, but not both, must be present. - MountVolume *TypeMount `json:",omitempty"` - - // BlockVolume defines options for using this volume as a Block-type - // volume. - // - // Either BlockVolume or MountVolume, but not both, must be present. - BlockVolume *TypeBlock `json:",omitempty"` -} - -// Scope defines the Scope of a Cluster Volume. This is how many nodes a -// Volume can be accessed simultaneously on. -type Scope string - -const ( - // ScopeSingleNode indicates the volume can be used on one node at a - // time. - ScopeSingleNode Scope = "single" - - // ScopeMultiNode indicates the volume can be used on many nodes at - // the same time. - ScopeMultiNode Scope = "multi" -) - -// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a -// Volume at the same time can use it. -type SharingMode string - -const ( - // SharingNone indicates that only one Task may use the Volume at a - // time. - SharingNone SharingMode = "none" - - // SharingReadOnly indicates that the Volume may be shared by any - // number of Tasks, but they must be read-only. - SharingReadOnly SharingMode = "readonly" - - // SharingOneWriter indicates that the Volume may be shared by any - // number of Tasks, but all after the first must be read-only. - SharingOneWriter SharingMode = "onewriter" - - // SharingAll means that the Volume may be shared by any number of - // Tasks, as readers or writers. - SharingAll SharingMode = "all" -) - -// TypeBlock defines options for using a volume as a block-type volume. -// -// Intentionally empty. -type TypeBlock struct{} - -// TypeMount contains options for using a volume as a Mount-type -// volume. -type TypeMount struct { - // FsType specifies the filesystem type for the mount volume. Optional. - FsType string `json:",omitempty"` - - // MountFlags defines flags to pass when mounting the volume. Optional. - MountFlags []string `json:",omitempty"` -} - -// TopologyRequirement expresses the user's requirements for a volume's -// accessible topology. -type TopologyRequirement struct { - // Requisite specifies a list of Topologies, at least one of which the - // volume must be accessible from. - // - // Taken verbatim from the CSI Spec: - // - // Specifies the list of topologies the provisioned volume MUST be - // accessible from. - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // If requisite is specified, the provisioned volume MUST be - // accessible from at least one of the requisite topologies. - // - // Given - // x = number of topologies provisioned volume is accessible from - // n = number of requisite topologies - // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 - // If x==n, then the SP MUST make the provisioned volume available to - // all topologies from the list of requisite topologies. If it is - // unable to do so, the SP MUST fail the CreateVolume call. - // For example, if a volume should be accessible from a single zone, - // and requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2". - // Similarly, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and both "zone" "Z2" and "zone" "Z3". - // - // If xn, then the SP MUST make the provisioned volume available from - // all topologies from the list of requisite topologies and MAY choose - // the remaining x-n unique topologies from the list of all possible - // topologies. If it is unable to do so, the SP MUST fail the - // CreateVolume call. - // For example, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2" and the SP may select the second zone - // independently, e.g. "R1/Z4". - Requisite []Topology `json:",omitempty"` - - // Preferred is a list of Topologies that the volume should attempt to be - // provisioned in. - // - // Taken from the CSI spec: - // - // Specifies the list of topologies the CO would prefer the volume to - // be provisioned in. - // - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // An SP MUST attempt to make the provisioned volume available using - // the preferred topologies in order from first to last. - // - // If requisite is specified, all topologies in preferred list MUST - // also be present in the list of requisite topologies. - // - // If the SP is unable to to make the provisioned volume available - // from any of the preferred topologies, the SP MAY choose a topology - // from the list of requisite topologies. - // If the list of requisite topologies is not specified, then the SP - // MAY choose from the list of all possible topologies. - // If the list of requisite topologies is specified and the SP is - // unable to to make the provisioned volume available from any of the - // requisite topologies it MUST fail the CreateVolume call. - // - // Example 1: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // preferred = - // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume - // available from "zone" "Z3" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. - // - // Example 2: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z2"} - // then the the SP SHOULD first attempt to make the provisioned volume - // accessible from "zone" "Z4" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. If that - // is not possible, the SP may choose between either the "zone" - // "Z3" or "Z5" in the "region" "R1". - // - // Example 3: - // Given a volume should be accessible from TWO zones (because an - // opaque parameter in CreateVolumeRequest, for example, specifies - // the volume is accessible from two zones, aka synchronously - // replicated), and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z5"}, - // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume - // accessible from the combination of the two "zones" "Z5" and "Z3" in - // the "region" "R1". If that's not possible, it should fall back to - // a combination of "Z5" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of "Z3" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of other possibilities from the list of requisite. - Preferred []Topology `json:",omitempty"` -} - -// Topology is a map of topological domains to topological segments. -// -// This description is taken verbatim from the CSI Spec: -// -// A topological domain is a sub-division of a cluster, like "region", -// "zone", "rack", etc. -// A topological segment is a specific instance of a topological domain, -// like "zone3", "rack3", etc. -// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an OPTIONAL prefix and name, separated -// by a slash (/), for example: "com.company.example/zone". -// The key name segment is REQUIRED. The prefix is OPTIONAL. -// The key name MUST be 63 characters or less, begin and end with an -// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), -// underscores (_), dots (.), or alphanumerics in between, for example -// "zone". -// The key prefix MUST be 63 characters or less, begin and end with a -// lower-case alphanumeric character ([a-z0-9]), contain only -// dashes (-), dots (.), or lower-case alphanumerics in between, and -// follow domain name notation format -// (https://tools.ietf.org/html/rfc1035#section-2.3.1). -// The key prefix SHOULD include the plugin's host company name and/or -// the plugin name, to minimize the possibility of collisions with keys -// from other plugins. -// If a key prefix is specified, it MUST be identical across all -// topology keys returned by the SP (across all RPCs). -// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" -// MUST not both exist. -// Each value (topological segment) MUST contain 1 or more strings. -// Each string MUST be 63 characters or less and begin and end with an -// alphanumeric character with '-', '_', '.', or alphanumerics in -// between. -type Topology struct { - Segments map[string]string `json:",omitempty"` -} - -// CapacityRange describes the minimum and maximum capacity a volume should be -// created with -type CapacityRange struct { - // RequiredBytes specifies that a volume must be at least this big. The - // value of 0 indicates an unspecified minimum. - RequiredBytes int64 - - // LimitBytes specifies that a volume must not be bigger than this. The - // value of 0 indicates an unspecified maximum - LimitBytes int64 -} - -// Secret represents a Swarm Secret value that must be passed to the CSI -// storage plugin when operating on this Volume. It represents one key-value -// pair of possibly many. -type Secret struct { - // Key is the name of the key of the key-value pair passed to the plugin. - Key string - - // Secret is the swarm Secret object from which to read data. This can be a - // Secret name or ID. The Secret data is retrieved by Swarm and used as the - // value of the key-value pair passed to the plugin. - Secret string -} - -// PublishState represents the state of a Volume as it pertains to its -// use on a particular Node. -type PublishState string - -const ( - // StatePending indicates that the volume should be published on - // this node, but the call to ControllerPublishVolume has not been - // successfully completed yet and the result recorded by swarmkit. - StatePending PublishState = "pending-publish" - - // StatePublished means the volume is published successfully to the node. - StatePublished PublishState = "published" - - // StatePendingNodeUnpublish indicates that the Volume should be - // unpublished on the Node, and we're waiting for confirmation that it has - // done so. After the Node has confirmed that the Volume has been - // unpublished, the state will move to StatePendingUnpublish. - StatePendingNodeUnpublish PublishState = "pending-node-unpublish" - - // StatePendingUnpublish means the volume is still published to the node - // by the controller, awaiting the operation to unpublish it. - StatePendingUnpublish PublishState = "pending-controller-unpublish" -) - -// PublishStatus represents the status of the volume as published to an -// individual node -type PublishStatus struct { - // NodeID is the ID of the swarm node this Volume is published to. - NodeID string `json:",omitempty"` - - // State is the publish state of the volume. - State PublishState `json:",omitempty"` - - // PublishContext is the PublishContext returned by the CSI plugin when - // a volume is published. - PublishContext map[string]string `json:",omitempty"` -} - -// Info contains information about the Volume as a whole as provided by -// the CSI storage plugin. -type Info struct { - // CapacityBytes is the capacity of the volume in bytes. A value of 0 - // indicates that the capacity is unknown. - CapacityBytes int64 `json:",omitempty"` - - // VolumeContext is the context originating from the CSI storage plugin - // when the Volume is created. - VolumeContext map[string]string `json:",omitempty"` - - // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This - // is distinct from the Volume's Swarm ID, which is the ID used by all of - // the Docker Engine to refer to the Volume. If this field is blank, then - // the Volume has not been successfully created yet. - VolumeID string `json:",omitempty"` - - // AccessibleTopolgoy is the topology this volume is actually accessible - // from. - AccessibleTopology []Topology `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/create_options.go b/vendor/github.com/docker/docker/api/types/volume/create_options.go deleted file mode 100644 index 37c41a6096..0000000000 --- a/vendor/github.com/docker/docker/api/types/volume/create_options.go +++ /dev/null @@ -1,29 +0,0 @@ -package volume - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// CreateOptions VolumeConfig -// -// Volume configuration -// swagger:model CreateOptions -type CreateOptions struct { - - // cluster volume spec - ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` - - // Name of the volume driver to use. - Driver string `json:"Driver,omitempty"` - - // A mapping of driver options and values. These options are - // passed directly to the driver and are driver specific. - // - DriverOpts map[string]string `json:"DriverOpts,omitempty"` - - // User-defined key/value metadata. - Labels map[string]string `json:"Labels,omitempty"` - - // The new volume's name. If not specified, Docker generates a name. - // - Name string `json:"Name,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/list_response.go b/vendor/github.com/docker/docker/api/types/volume/list_response.go deleted file mode 100644 index ca5192a2a9..0000000000 --- a/vendor/github.com/docker/docker/api/types/volume/list_response.go +++ /dev/null @@ -1,18 +0,0 @@ -package volume - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ListResponse VolumeListResponse -// -// Volume list response -// swagger:model ListResponse -type ListResponse struct { - - // List of volumes - Volumes []*Volume `json:"Volumes"` - - // Warnings that occurred when fetching the list of volumes. - // - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go deleted file mode 100644 index 8b0dd13899..0000000000 --- a/vendor/github.com/docker/docker/api/types/volume/options.go +++ /dev/null @@ -1,8 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -import "github.com/docker/docker/api/types/filters" - -// ListOptions holds parameters to list volumes. -type ListOptions struct { - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume.go b/vendor/github.com/docker/docker/api/types/volume/volume.go deleted file mode 100644 index ea7d555e5b..0000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume.go +++ /dev/null @@ -1,75 +0,0 @@ -package volume - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Volume volume -// swagger:model Volume -type Volume struct { - - // cluster volume - ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` - - // Date/Time the volume was created. - CreatedAt string `json:"CreatedAt,omitempty"` - - // Name of the volume driver used by the volume. - // Required: true - Driver string `json:"Driver"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // Mount path of the volume on the host. - // Required: true - Mountpoint string `json:"Mountpoint"` - - // Name of the volume. - // Required: true - Name string `json:"Name"` - - // The driver specific options used when creating the volume. - // - // Required: true - Options map[string]string `json:"Options"` - - // The level at which the volume exists. Either `global` for cluster-wide, - // or `local` for machine level. - // - // Required: true - Scope string `json:"Scope"` - - // Low-level details about the volume, provided by the volume driver. - // Details are returned as a map with key/value pairs: - // `{"key":"value","key2":"value2"}`. - // - // The `Status` field is optional, and is omitted if the volume driver - // does not support this feature. - // - Status map[string]interface{} `json:"Status,omitempty"` - - // usage data - UsageData *UsageData `json:"UsageData,omitempty"` -} - -// UsageData Usage details about the volume. This information is used by the -// `GET /system/df` endpoint, and omitted in other endpoints. -// -// swagger:model UsageData -type UsageData struct { - - // The number of containers referencing this volume. This field - // is set to `-1` if the reference-count is not available. - // - // Required: true - RefCount int64 `json:"RefCount"` - - // Amount of disk space used by the volume (in bytes). This information - // is only available for volumes created with the `"local"` volume - // driver. For volumes created with other volume drivers, this field - // is set to `-1` ("not available") - // - // Required: true - Size int64 `json:"Size"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go deleted file mode 100644 index f958f80a66..0000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_update.go +++ /dev/null @@ -1,7 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// UpdateOptions is configuration to update a Volume with. -type UpdateOptions struct { - // Spec is the ClusterVolumeSpec to update the volume to. - Spec *ClusterVolumeSpec `json:"Spec,omitempty"` -} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md deleted file mode 100644 index 6cb94be263..0000000000 --- a/vendor/github.com/docker/docker/client/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# Go client for the Docker Engine API - -The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. - -For example, to list running containers (the equivalent of `docker ps`): - -```go -package main - -import ( - "context" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" -) - -func main() { - cli, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - panic(err) - } - defer cli.Close() - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) - if err != nil { - panic(err) - } - - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) - } -} -``` - -[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go deleted file mode 100644 index b76bf366bb..0000000000 --- a/vendor/github.com/docker/docker/client/build_cancel.go +++ /dev/null @@ -1,16 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// BuildCancel requests the daemon to cancel the ongoing build request. -func (cli *Client) BuildCancel(ctx context.Context, id string) error { - query := url.Values{} - query.Set("id", id) - - serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) - ensureReaderClosed(serverResp) - return err -} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go deleted file mode 100644 index 2b6606236e..0000000000 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ /dev/null @@ -1,45 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/pkg/errors" -) - -// BuildCachePrune requests the daemon to delete unused cache data -func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { - if err := cli.NewVersionError("1.31", "build prune"); err != nil { - return nil, err - } - - report := types.BuildCachePruneReport{} - - query := url.Values{} - if opts.All { - query.Set("all", "1") - } - query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage))) - f, err := filters.ToJSON(opts.Filters) - if err != nil { - return nil, errors.Wrap(err, "prune could not marshal filters option") - } - query.Set("filters", f) - - serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - - if err != nil { - return nil, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return nil, errors.Wrap(err, "error retrieving disk usage") - } - - return &report, nil -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go deleted file mode 100644 index 9746d288df..0000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/checkpoint" -) - -// CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error { - resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go deleted file mode 100644 index b968c2b237..0000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/checkpoint" -) - -// CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options checkpoint.DeleteOptions) error { - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go deleted file mode 100644 index 8feb1f3f7d..0000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/checkpoint" -) - -// CheckpointList returns the checkpoints of the given container in the docker host -func (cli *Client) CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) { - var checkpoints []checkpoint.Summary - - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return checkpoints, err - } - - err = json.NewDecoder(resp.body).Decode(&checkpoints) - return checkpoints, err -} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go deleted file mode 100644 index c321e5198b..0000000000 --- a/vendor/github.com/docker/docker/client/client.go +++ /dev/null @@ -1,415 +0,0 @@ -/* -Package client is a Go client for the Docker Engine API. - -For more information about the Engine API, see the documentation: -https://docs.docker.com/engine/api/ - -# Usage - -You use the library by constructing a client object using [NewClientWithOpts] -and calling methods on it. The client can be configured from environment -variables by passing the [FromEnv] option, or configured manually by passing any -of the other available [Opts]. - -For example, to list running containers (the equivalent of "docker ps"): - - package main - - import ( - "context" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - ) - - func main() { - cli, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - panic(err) - } - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) - if err != nil { - panic(err) - } - - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) - } - } -*/ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "crypto/tls" - "net" - "net/http" - "net/url" - "path" - "strings" - "time" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/go-connections/sockets" - "github.com/pkg/errors" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - "go.opentelemetry.io/otel/trace" -) - -// DummyHost is a hostname used for local communication. -// -// It acts as a valid formatted hostname for local connections (such as "unix://" -// or "npipe://") which do not require a hostname. It should never be resolved, -// but uses the special-purpose ".localhost" TLD (as defined in [RFC 2606, Section 2] -// and [RFC 6761, Section 6.3]). -// -// [RFC 7230, Section 5.4] defines that an empty header must be used for such -// cases: -// -// If the authority component is missing or undefined for the target URI, -// then a client MUST send a Host header field with an empty field-value. -// -// However, [Go stdlib] enforces the semantics of HTTP(S) over TCP, does not -// allow an empty header to be used, and requires req.URL.Scheme to be either -// "http" or "https". -// -// For further details, refer to: -// -// - https://github.com/docker/engine-api/issues/189 -// - https://github.com/golang/go/issues/13624 -// - https://github.com/golang/go/issues/61076 -// - https://github.com/moby/moby/issues/45935 -// -// [RFC 2606, Section 2]: https://www.rfc-editor.org/rfc/rfc2606.html#section-2 -// [RFC 6761, Section 6.3]: https://www.rfc-editor.org/rfc/rfc6761#section-6.3 -// [RFC 7230, Section 5.4]: https://datatracker.ietf.org/doc/html/rfc7230#section-5.4 -// [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569 -const DummyHost = "api.moby.localhost" - -// Client is the API client that performs all operations -// against a docker server. -type Client struct { - // scheme sets the scheme for the client - scheme string - // host holds the server address to connect to - host string - // proto holds the client protocol i.e. unix. - proto string - // addr holds the client address. - addr string - // basePath holds the path to prepend to the requests. - basePath string - // client used to send and receive http requests. - client *http.Client - // version of the server to talk to. - version string - // userAgent is the User-Agent header to use for HTTP requests. It takes - // precedence over User-Agent headers set in customHTTPHeaders, and other - // header variables. When set to an empty string, the User-Agent header - // is removed, and no header is sent. - userAgent *string - // custom HTTP headers configured by users. - customHTTPHeaders map[string]string - // manualOverride is set to true when the version was set by users. - manualOverride bool - - // negotiateVersion indicates if the client should automatically negotiate - // the API version to use when making requests. API version negotiation is - // performed on the first request, after which negotiated is set to "true" - // so that subsequent requests do not re-negotiate. - negotiateVersion bool - - // negotiated indicates that API version negotiation took place - negotiated bool - - tp trace.TracerProvider - - // When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections). - // Store the original transport as the http.Client transport will be wrapped with tracing libs. - baseTransport *http.Transport -} - -// ErrRedirect is the error returned by checkRedirect when the request is non-GET. -var ErrRedirect = errors.New("unexpected redirect in response") - -// CheckRedirect specifies the policy for dealing with redirect responses. It -// can be set on [http.Client.CheckRedirect] to prevent HTTP redirects for -// non-GET requests. It returns an [ErrRedirect] for non-GET request, otherwise -// returns a [http.ErrUseLastResponse], which is special-cased by http.Client -// to use the last response. -// -// Go 1.8 changed behavior for HTTP redirects (specifically 301, 307, and 308) -// in the client. The client (and by extension API client) can be made to send -// a request like "POST /containers//start" where what would normally be in the -// name section of the URL is empty. This triggers an HTTP 301 from the daemon. -// -// In go 1.8 this 301 is converted to a GET request, and ends up getting -// a 404 from the daemon. This behavior change manifests in the client in that -// before, the 301 was not followed and the client did not generate an error, -// but now results in a message like "Error response from daemon: page not found". -func CheckRedirect(_ *http.Request, via []*http.Request) error { - if via[0].Method == http.MethodGet { - return http.ErrUseLastResponse - } - return ErrRedirect -} - -// NewClientWithOpts initializes a new API client with a default HTTPClient, and -// default API host and version. It also initializes the custom HTTP headers to -// add to each request. -// -// It takes an optional list of [Opt] functional arguments, which are applied in -// the order they're provided, which allows modifying the defaults when creating -// the client. For example, the following initializes a client that configures -// itself with values from environment variables ([FromEnv]), and has automatic -// API version negotiation enabled ([WithAPIVersionNegotiation]). -// -// cli, err := client.NewClientWithOpts( -// client.FromEnv, -// client.WithAPIVersionNegotiation(), -// ) -func NewClientWithOpts(ops ...Opt) (*Client, error) { - hostURL, err := ParseHostURL(DefaultDockerHost) - if err != nil { - return nil, err - } - - client, err := defaultHTTPClient(hostURL) - if err != nil { - return nil, err - } - c := &Client{ - host: DefaultDockerHost, - version: api.DefaultVersion, - client: client, - proto: hostURL.Scheme, - addr: hostURL.Host, - } - - for _, op := range ops { - if err := op(c); err != nil { - return nil, err - } - } - - if tr, ok := c.client.Transport.(*http.Transport); ok { - // Store the base transport before we wrap it in tracing libs below - // This is used, as an example, to close idle connections when the client is closed - c.baseTransport = tr - } - - if c.scheme == "" { - // TODO(stevvooe): This isn't really the right way to write clients in Go. - // `NewClient` should probably only take an `*http.Client` and work from there. - // Unfortunately, the model of having a host-ish/url-thingy as the connection - // string has us confusing protocol and transport layers. We continue doing - // this to avoid breaking existing clients but this should be addressed. - if c.tlsConfig() != nil { - c.scheme = "https" - } else { - c.scheme = "http" - } - } - - c.client.Transport = otelhttp.NewTransport( - c.client.Transport, - otelhttp.WithTracerProvider(c.tp), - otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { - return req.Method + " " + req.URL.Path - }), - ) - - return c, nil -} - -func (cli *Client) tlsConfig() *tls.Config { - if cli.baseTransport == nil { - return nil - } - return cli.baseTransport.TLSClientConfig -} - -func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { - transport := &http.Transport{} - err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) - if err != nil { - return nil, err - } - return &http.Client{ - Transport: transport, - CheckRedirect: CheckRedirect, - }, nil -} - -// Close the transport used by the client -func (cli *Client) Close() error { - if cli.baseTransport != nil { - cli.baseTransport.CloseIdleConnections() - return nil - } - return nil -} - -// getAPIPath returns the versioned request path to call the API. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { - var apiPath string - if cli.negotiateVersion && !cli.negotiated { - cli.NegotiateAPIVersion(ctx) - } - if cli.version != "" { - v := strings.TrimPrefix(cli.version, "v") - apiPath = path.Join(cli.basePath, "/v"+v, p) - } else { - apiPath = path.Join(cli.basePath, p) - } - return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() -} - -// ClientVersion returns the API version used by this client. -func (cli *Client) ClientVersion() string { - return cli.version -} - -// NegotiateAPIVersion queries the API and updates the version to match the API -// version. NegotiateAPIVersion downgrades the client's API version to match the -// APIVersion if the ping version is lower than the default version. If the API -// version reported by the server is higher than the maximum version supported -// by the client, it uses the client's maximum version. -// -// If a manual override is in place, either through the "DOCKER_API_VERSION" -// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized -// with a fixed version ([WithVersion]), no negotiation is performed. -// -// If the API server's ping response does not contain an API version, or if the -// client did not get a successful ping response, it assumes it is connected with -// an old daemon that does not support API version negotiation, in which case it -// downgrades to the latest version of the API before version negotiation was -// added (1.24). -func (cli *Client) NegotiateAPIVersion(ctx context.Context) { - if !cli.manualOverride { - ping, _ := cli.Ping(ctx) - cli.negotiateAPIVersionPing(ping) - } -} - -// NegotiateAPIVersionPing downgrades the client's API version to match the -// APIVersion in the ping response. If the API version in pingResponse is higher -// than the maximum version supported by the client, it uses the client's maximum -// version. -// -// If a manual override is in place, either through the "DOCKER_API_VERSION" -// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized -// with a fixed version ([WithVersion]), no negotiation is performed. -// -// If the API server's ping response does not contain an API version, we assume -// we are connected with an old daemon without API version negotiation support, -// and downgrade to the latest version of the API before version negotiation was -// added (1.24). -func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { - if !cli.manualOverride { - cli.negotiateAPIVersionPing(pingResponse) - } -} - -// negotiateAPIVersionPing queries the API and updates the version to match the -// API version from the ping response. -func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { - // default to the latest version before versioning headers existed - if pingResponse.APIVersion == "" { - pingResponse.APIVersion = "1.24" - } - - // if the client is not initialized with a version, start with the latest supported version - if cli.version == "" { - cli.version = api.DefaultVersion - } - - // if server version is lower than the client version, downgrade - if versions.LessThan(pingResponse.APIVersion, cli.version) { - cli.version = pingResponse.APIVersion - } - - // Store the results, so that automatic API version negotiation (if enabled) - // won't be performed on the next request. - if cli.negotiateVersion { - cli.negotiated = true - } -} - -// DaemonHost returns the host address used by the client -func (cli *Client) DaemonHost() string { - return cli.host -} - -// HTTPClient returns a copy of the HTTP client bound to the server -func (cli *Client) HTTPClient() *http.Client { - c := *cli.client - return &c -} - -// ParseHostURL parses a url string, validates the string is a host url, and -// returns the parsed URL -func ParseHostURL(host string) (*url.URL, error) { - proto, addr, ok := strings.Cut(host, "://") - if !ok || addr == "" { - return nil, errors.Errorf("unable to parse docker host `%s`", host) - } - - var basePath string - if proto == "tcp" { - parsed, err := url.Parse("tcp://" + addr) - if err != nil { - return nil, err - } - addr = parsed.Host - basePath = parsed.Path - } - return &url.URL{ - Scheme: proto, - Host: addr, - Path: basePath, - }, nil -} - -func (cli *Client) dialerFromTransport() func(context.Context, string, string) (net.Conn, error) { - if cli.baseTransport == nil || cli.baseTransport.DialContext == nil { - return nil - } - - if cli.baseTransport.TLSClientConfig != nil { - // When using a tls config we don't use the configured dialer but instead a fallback dialer... - // Note: It seems like this should use the normal dialer and wrap the returned net.Conn in a tls.Conn - // I honestly don't know why it doesn't do that, but it doesn't and such a change is entirely unrelated to the change in this commit. - return nil - } - return cli.baseTransport.DialContext -} - -// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, -// that can be used for proxying the daemon connection. It is used by -// ["docker dial-stdio"]. -// -// ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014 -func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { - return func(ctx context.Context) (net.Conn, error) { - if dialFn := cli.dialerFromTransport(); dialFn != nil { - return dialFn(ctx, cli.proto, cli.addr) - } - switch cli.proto { - case "unix": - return net.Dial(cli.proto, cli.addr) - case "npipe": - return sockets.DialPipe(cli.addr, 32*time.Second) - default: - if tlsConfig := cli.tlsConfig(); tlsConfig != nil { - return tls.Dial(cli.proto, cli.addr, tlsConfig) - } - return net.Dial(cli.proto, cli.addr) - } - } -} diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go deleted file mode 100644 index 9e366ce20d..0000000000 --- a/vendor/github.com/docker/docker/client/client_deprecated.go +++ /dev/null @@ -1,27 +0,0 @@ -package client - -import "net/http" - -// NewClient initializes a new API client for the given host and API version. -// It uses the given http client as transport. -// It also initializes the custom http headers to add to each request. -// -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. -// -// Deprecated: use [NewClientWithOpts] passing the [WithHost], [WithVersion], -// [WithHTTPClient] and [WithHTTPHeaders] options. We recommend enabling API -// version negotiation by passing the [WithAPIVersionNegotiation] option instead -// of WithVersion. -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) -} - -// NewEnvClient initializes a new API client based on environment variables. -// See FromEnv for a list of support environment variables. -// -// Deprecated: use [NewClientWithOpts] passing the [FromEnv] option. -func NewEnvClient() (*Client, error) { - return NewClientWithOpts(FromEnv) -} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go deleted file mode 100644 index 9fe78ea43a..0000000000 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !windows - -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST -// (EnvOverrideHost) environment variable is unset or empty. -const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go deleted file mode 100644 index 56572d1a27..0000000000 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST -// (EnvOverrideHost) environment variable is unset or empty. -const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go deleted file mode 100644 index f6b1881fc3..0000000000 --- a/vendor/github.com/docker/docker/client/config_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ConfigCreate creates a new config. -func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { - var response types.ConfigCreateResponse - if err := cli.NewVersionError("1.30", "config create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/configs/create", nil, config, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go deleted file mode 100644 index 9be7882c3d..0000000000 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigInspectWithRaw returns the config information with raw data -func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { - if id == "" { - return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} - } - if err := cli.NewVersionError("1.30", "config inspect"); err != nil { - return swarm.Config{}, nil, err - } - resp, err := cli.get(ctx, "/configs/"+id, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Config{}, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return swarm.Config{}, nil, err - } - - var config swarm.Config - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&config) - - return config, body, err -} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go deleted file mode 100644 index 565acc6e27..0000000000 --- a/vendor/github.com/docker/docker/client/config_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// ConfigList returns the list of configs. -func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { - if err := cli.NewVersionError("1.30", "config list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/configs", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var configs []swarm.Config - err = json.NewDecoder(resp.body).Decode(&configs) - return configs, err -} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go deleted file mode 100644 index 24b94e9c18..0000000000 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ConfigRemove removes a config. -func (cli *Client) ConfigRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.30", "config remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go deleted file mode 100644 index 1ac2985435..0000000000 --- a/vendor/github.com/docker/docker/client/config_update.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigUpdate attempts to update a config -func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { - if err := cli.NewVersionError("1.30", "config update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", version.String()) - resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go deleted file mode 100644 index a3a009050d..0000000000 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ /dev/null @@ -1,59 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerAttach attaches a connection to a container in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { - query := url.Values{} - if options.Stream { - query.Set("stream", "1") - } - if options.Stdin { - query.Set("stdin", "1") - } - if options.Stdout { - query.Set("stdout", "1") - } - if options.Stderr { - query.Set("stderr", "1") - } - if options.DetachKeys != "" { - query.Set("detachKeys", options.DetachKeys) - } - if options.Logs { - query.Set("logs", "1") - } - - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, http.Header{ - "Content-Type": {"text/plain"}, - }) -} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go deleted file mode 100644 index d3e597bd7c..0000000000 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ /dev/null @@ -1,55 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "errors" - "net/url" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ContainerCommit applies changes to a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { - var repository, tag string - if options.Reference != "" { - ref, err := reference.ParseNormalizedNamed(options.Reference) - if err != nil { - return types.IDResponse{}, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") - } - ref = reference.TagNameOnly(ref) - - if tagged, ok := ref.(reference.Tagged); ok { - tag = tagged.Tag() - } - repository = reference.FamiliarName(ref) - } - - query := url.Values{} - query.Set("container", container) - query.Set("repo", repository) - query.Set("tag", tag) - query.Set("comment", options.Comment) - query.Set("author", options.Author) - for _, change := range options.Changes { - query.Add("changes", change) - } - if !options.Pause { - query.Set("pause", "0") - } - - var response types.IDResponse - resp, err := cli.post(ctx, "/commit", query, options.Config, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go deleted file mode 100644 index 883be7fa34..0000000000 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ /dev/null @@ -1,93 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path/filepath" - "strings" - - "github.com/docker/docker/api/types" -) - -// ContainerStatPath returns stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - - urlStr := "/containers/" + containerID + "/archive" - response, err := cli.head(ctx, urlStr, query, nil) - defer ensureReaderClosed(response) - if err != nil { - return types.ContainerPathStat{}, err - } - return getContainerPathStatFromHeader(response.header) -} - -// CopyToContainer copies content into the container filesystem. -// Note that `content` must be a Reader for a TAR archive -func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { - query := url.Values{} - query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. - // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. - if !options.AllowOverwriteDirWithFile { - query.Set("noOverwriteDirNonDir", "true") - } - - if options.CopyUIDGID { - query.Set("copyUIDGID", "true") - } - - apiPath := "/containers/" + containerID + "/archive" - - response, err := cli.putRaw(ctx, apiPath, query, content, nil) - defer ensureReaderClosed(response) - if err != nil { - return err - } - - return nil -} - -// CopyFromContainer gets the content from the container and returns it as a Reader -// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { - query := make(url.Values, 1) - query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - - apiPath := "/containers/" + containerID + "/archive" - response, err := cli.get(ctx, apiPath, query, nil) - if err != nil { - return nil, types.ContainerPathStat{}, err - } - - // In order to get the copy behavior right, we need to know information - // about both the source and the destination. The response headers include - // stat info about the source that we can use in deciding exactly how to - // copy it locally. Along with the stat info about the local destination, - // we have everything we need to handle the multiple possibilities there - // can be when copying a file/dir from one location to another file/dir. - stat, err := getContainerPathStatFromHeader(response.header) - if err != nil { - return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) - } - return response.body, stat, err -} - -func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { - var stat types.ContainerPathStat - - encodedStat := header.Get("X-Docker-Container-Path-Stat") - statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) - - err := json.NewDecoder(statDecoder).Decode(&stat) - if err != nil { - err = fmt.Errorf("unable to decode container path stat header: %s", err) - } - - return stat, err -} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go deleted file mode 100644 index 14a2127d88..0000000000 --- a/vendor/github.com/docker/docker/client/container_create.go +++ /dev/null @@ -1,86 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "path" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/versions" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -type configWrapper struct { - *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - -// ContainerCreate creates a new container based on the given configuration. -// It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) { - var response container.CreateResponse - - if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { - return response, err - } - if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { - return response, err - } - if err := cli.NewVersionError("1.44", "specify health-check start interval"); config != nil && config.Healthcheck != nil && config.Healthcheck.StartInterval != 0 && err != nil { - return response, err - } - - if hostConfig != nil { - if versions.LessThan(cli.ClientVersion(), "1.25") { - // When using API 1.24 and under, the client is responsible for removing the container - hostConfig.AutoRemove = false - } - if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") { - // KernelMemory was added in API 1.40, and deprecated in API 1.42 - hostConfig.KernelMemory = 0 - } - if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") { - // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize - hostConfig.ConsoleSize = [2]uint{0, 0} - } - } - - query := url.Values{} - if p := formatPlatform(platform); p != "" { - query.Set("platform", p) - } - - if containerName != "" { - query.Set("name", containerName) - } - - body := configWrapper{ - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - } - - serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} - -// formatPlatform returns a formatted string representing platform (e.g. linux/arm/v7). -// -// Similar to containerd's platforms.Format(), but does allow components to be -// omitted (e.g. pass "architecture" only, without "os": -// https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263 -func formatPlatform(platform *ocispec.Platform) string { - if platform == nil { - return "" - } - return path.Join(platform.OS, platform.Architecture, platform.Variant) -} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go deleted file mode 100644 index c22c819a79..0000000000 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/container" -) - -// ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.FilesystemChange, error) { - var changes []container.FilesystemChange - - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return changes, err - } - - err = json.NewDecoder(serverResp.body).Decode(&changes) - return changes, err -} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go deleted file mode 100644 index 402d1b8394..0000000000 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ /dev/null @@ -1,66 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/http" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" -) - -// ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { - var response types.IDResponse - - if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { - return response, err - } - if versions.LessThan(cli.ClientVersion(), "1.42") { - config.ConsoleSize = nil - } - - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} - -// ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { - if versions.LessThan(cli.ClientVersion(), "1.42") { - config.ConsoleSize = nil - } - resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) - ensureReaderClosed(resp) - return err -} - -// ContainerExecAttach attaches a connection to an exec process in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { - if versions.LessThan(cli.ClientVersion(), "1.42") { - config.ConsoleSize = nil - } - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, http.Header{ - "Content-Type": {"application/json"}, - }) -} - -// ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { - var response types.ContainerExecInspect - resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go deleted file mode 100644 index d0c0a5cbad..0000000000 --- a/vendor/github.com/docker/docker/client/container_export.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" -) - -// ContainerExport retrieves the raw contents of a container -// and returns them as an io.ReadCloser. It's up to the caller -// to close the stream. -func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) - if err != nil { - return nil, err - } - - return serverResp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go deleted file mode 100644 index d48f0d3a68..0000000000 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ /dev/null @@ -1,53 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - if containerID == "" { - return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ContainerJSON{}, err - } - - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} - -// ContainerInspectWithRaw returns the container information and its raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { - if containerID == "" { - return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} - } - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - var response types.ContainerJSON - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go deleted file mode 100644 index 7c9529f1e1..0000000000 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// ContainerKill terminates the container process but does not remove the container from the docker host. -func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { - query := url.Values{} - if signal != "" { - query.Set("signal", signal) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go deleted file mode 100644 index 127a57cfd8..0000000000 --- a/vendor/github.com/docker/docker/client/container_list.go +++ /dev/null @@ -1,56 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { - query := url.Values{} - - if options.All { - query.Set("all", "1") - } - - if options.Limit > 0 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Since != "" { - query.Set("since", options.Since) - } - - if options.Before != "" { - query.Set("before", options.Before) - } - - if options.Size { - query.Set("size", "1") - } - - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/containers/json", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var containers []types.Container - err = json.NewDecoder(resp.body).Decode(&containers) - return containers, err -} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go deleted file mode 100644 index 9bdf2b0fa6..0000000000 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ /dev/null @@ -1,80 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" - "github.com/pkg/errors" -) - -// ContainerLogs returns the logs generated by a container in an io.ReadCloser. -// It's up to the caller to close the stream. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "since"`) - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "until"`) - } - query.Set("until", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go deleted file mode 100644 index 5e7271a371..0000000000 --- a/vendor/github.com/docker/docker/client/container_pause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ContainerPause pauses the main process of a given container without terminating it. -func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go deleted file mode 100644 index 04383deaaf..0000000000 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// ContainersPrune requests the daemon to delete unused data -func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { - var report types.ContainersPruneReport - - if err := cli.NewVersionError("1.25", "container prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go deleted file mode 100644 index c21de609b0..0000000000 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ /dev/null @@ -1,27 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { - query := url.Values{} - if options.RemoveVolumes { - query.Set("v", "1") - } - if options.RemoveLinks { - query.Set("link", "1") - } - - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go deleted file mode 100644 index 240fdf552b..0000000000 --- a/vendor/github.com/docker/docker/client/container_rename.go +++ /dev/null @@ -1,15 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// ContainerRename changes the name of a given container. -func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { - query := url.Values{} - query.Set("name", newContainerName) - resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go deleted file mode 100644 index a9d4c0c79a..0000000000 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" -) - -// ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) -} - -// ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) -} - -func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { - query := url.Values{} - query.Set("h", strconv.Itoa(int(height))) - query.Set("w", strconv.Itoa(int(width))) - - resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go deleted file mode 100644 index 1e0ad99981..0000000000 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerRestart stops and starts a container again. -// It makes the daemon wait for the container to be up again for -// a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error { - query := url.Values{} - if options.Timeout != nil { - query.Set("t", strconv.Itoa(*options.Timeout)) - } - if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go deleted file mode 100644 index c2e0b15dca..0000000000 --- a/vendor/github.com/docker/docker/client/container_start.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { - query := url.Values{} - if len(options.CheckpointID) != 0 { - query.Set("checkpoint", options.CheckpointID) - } - if len(options.CheckpointDir) != 0 { - query.Set("checkpoint-dir", options.CheckpointDir) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go deleted file mode 100644 index 3fabb75f32..0000000000 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ /dev/null @@ -1,46 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerStats returns near realtime stats for a given container. -// It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { - query := url.Values{} - query.Set("stream", "0") - if stream { - query.Set("stream", "1") - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return types.ContainerStats{}, err - } - - return types.ContainerStats{ - Body: resp.body, - OSType: getDockerOS(resp.header.Get("Server")), - }, nil -} - -// ContainerStatsOneShot gets a single stat entry from a container. -// It differs from `ContainerStats` in that the API should not wait to prime the stats -func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) { - query := url.Values{} - query.Set("stream", "0") - query.Set("one-shot", "1") - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return types.ContainerStats{}, err - } - - return types.ContainerStats{ - Body: resp.body, - OSType: getDockerOS(resp.header.Get("Server")), - }, nil -} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go deleted file mode 100644 index 2a43ce2274..0000000000 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ /dev/null @@ -1,30 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerStop stops a container. In case the container fails to stop -// gracefully within a time frame specified by the timeout argument, -// it is forcefully terminated (killed). -// -// If the timeout is nil, the container's StopTimeout value is used, if set, -// otherwise the engine default. A negative timeout value can be specified, -// meaning no timeout, i.e. no forceful termination is performed. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { - query := url.Values{} - if options.Timeout != nil { - query.Set("t", strconv.Itoa(*options.Timeout)) - } - if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go deleted file mode 100644 index a5b78999bf..0000000000 --- a/vendor/github.com/docker/docker/client/container_top.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strings" - - "github.com/docker/docker/api/types/container" -) - -// ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { - var response container.ContainerTopOKBody - query := url.Values{} - if len(arguments) > 0 { - query.Set("ps_args", strings.Join(arguments, " ")) - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go deleted file mode 100644 index 1d8f873169..0000000000 --- a/vendor/github.com/docker/docker/client/container_unpause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ContainerUnpause resumes the process execution within a container -func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go deleted file mode 100644 index bf68a5300e..0000000000 --- a/vendor/github.com/docker/docker/client/container_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/container" -) - -// ContainerUpdate updates the resources of a container. -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { - var response container.ContainerUpdateOKBody - serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go deleted file mode 100644 index 2375eb1e80..0000000000 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ /dev/null @@ -1,104 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "io" - "net/url" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ - -// ContainerWait waits until the specified container is in a certain state -// indicated by the given condition, either "not-running" (default), -// "next-exit", or "removed". -// -// If this client's API version is before 1.30, condition is ignored and -// ContainerWait will return immediately with the two channels, as the server -// will wait as if the condition were "not-running". -// -// If this client's API version is at least 1.30, ContainerWait blocks until -// the request has been acknowledged by the server (with a response header), -// then returns two channels on which the caller can wait for the exit status -// of the container or an error if there was a problem either beginning the -// wait request or in getting the response. This allows the caller to -// synchronize ContainerWait with other calls, such as specifying a -// "next-exit" condition before issuing a ContainerStart request. -func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { - if versions.LessThan(cli.ClientVersion(), "1.30") { - return cli.legacyContainerWait(ctx, containerID) - } - - resultC := make(chan container.WaitResponse) - errC := make(chan error, 1) - - query := url.Values{} - if condition != "" { - query.Set("condition", string(condition)) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) - if err != nil { - defer ensureReaderClosed(resp) - errC <- err - return resultC, errC - } - - go func() { - defer ensureReaderClosed(resp) - - body := resp.body - responseText := bytes.NewBuffer(nil) - stream := io.TeeReader(body, responseText) - - var res container.WaitResponse - if err := json.NewDecoder(stream).Decode(&res); err != nil { - // NOTE(nicks): The /wait API does not work well with HTTP proxies. - // At any time, the proxy could cut off the response stream. - // - // But because the HTTP status has already been written, the proxy's - // only option is to write a plaintext error message. - // - // If there's a JSON parsing error, read the real error message - // off the body and send it to the client. - _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) - errC <- errors.New(responseText.String()) - return - } - - resultC <- res - }() - - return resultC, errC -} - -// legacyContainerWait returns immediately and doesn't have an option to wait -// until the container is removed. -func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.WaitResponse, <-chan error) { - resultC := make(chan container.WaitResponse) - errC := make(chan error) - - go func() { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - errC <- err - return - } - defer ensureReaderClosed(resp) - - var res container.WaitResponse - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - errC <- err - return - } - - resultC <- res - }() - - return resultC, errC -} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go deleted file mode 100644 index ba0d92e9e6..0000000000 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types" -) - -// DiskUsage requests the current data usage from the daemon -func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) { - var query url.Values - if len(options.Types) > 0 { - query = url.Values{} - for _, t := range options.Types { - query.Add("type", string(t)) - } - } - - serverResp, err := cli.get(ctx, "/system/df", query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.DiskUsage{}, err - } - - var du types.DiskUsage - if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { - return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err) - } - return du, nil -} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go deleted file mode 100644 index 0fb156245f..0000000000 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ /dev/null @@ -1,39 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/docker/api/types/registry" -) - -// DistributionInspect returns the image digest with the full manifest. -func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) { - // Contact the registry to retrieve digest and platform information - var distributionInspect registry.DistributionInspect - if image == "" { - return distributionInspect, objectNotFoundError{object: "distribution", id: image} - } - - if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { - return distributionInspect, err - } - - var headers http.Header - if encodedRegistryAuth != "" { - headers = http.Header{ - registry.AuthHeader: {encodedRegistryAuth}, - } - } - - resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) - defer ensureReaderClosed(resp) - if err != nil { - return distributionInspect, err - } - - err = json.NewDecoder(resp.body).Decode(&distributionInspect) - return distributionInspect, err -} diff --git a/vendor/github.com/docker/docker/client/envvars.go b/vendor/github.com/docker/docker/client/envvars.go deleted file mode 100644 index 61dd45c1d7..0000000000 --- a/vendor/github.com/docker/docker/client/envvars.go +++ /dev/null @@ -1,90 +0,0 @@ -package client // import "github.com/docker/docker/client" - -const ( - // EnvOverrideHost is the name of the environment variable that can be used - // to override the default host to connect to (DefaultDockerHost). - // - // This env-var is read by FromEnv and WithHostFromEnv and when set to a - // non-empty value, takes precedence over the default host (which is platform - // specific), or any host already set. - EnvOverrideHost = "DOCKER_HOST" - - // EnvOverrideAPIVersion is the name of the environment variable that can - // be used to override the API version to use. Value should be - // formatted as MAJOR.MINOR, for example, "1.19". - // - // This env-var is read by FromEnv and WithVersionFromEnv and when set to a - // non-empty value, takes precedence over API version negotiation. - // - // This environment variable should be used for debugging purposes only, as - // it can set the client to use an incompatible (or invalid) API version. - EnvOverrideAPIVersion = "DOCKER_API_VERSION" - - // EnvOverrideCertPath is the name of the environment variable that can be - // used to specify the directory from which to load the TLS certificates - // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure - // the Client for a TCP connection protected by TLS client authentication. - // - // TLS certificate verification is enabled by default if the Client is configured - // to use a TLS connection. Refer to EnvTLSVerify below to learn how to - // disable verification for testing purposes. - // - // WARNING: Access to the remote API is equivalent to root access to the - // host where the daemon runs. Do not expose the API without protection, - // and only if needed. Make sure you are familiar with the "daemon attack - // surface" (https://docs.docker.com/go/attack-surface/). - // - // For local access to the API, it is recommended to connect with the daemon - // using the default local socket connection (on Linux), or the named pipe - // (on Windows). - // - // If you need to access the API of a remote daemon, consider using an SSH - // (ssh://) connection, which is easier to set up, and requires no additional - // configuration if the host is accessible using ssh. - // - // If you cannot use the alternatives above, and you must expose the API over - // a TCP connection, refer to https://docs.docker.com/engine/security/protect-access/ - // to learn how to configure the daemon and client to use a TCP connection - // with TLS client authentication. Make sure you know the differences between - // a regular TLS connection and a TLS connection protected by TLS client - // authentication, and verify that the API cannot be accessed by other clients. - EnvOverrideCertPath = "DOCKER_CERT_PATH" - - // EnvTLSVerify is the name of the environment variable that can be used to - // enable or disable TLS certificate verification. When set to a non-empty - // value, TLS certificate verification is enabled, and the client is configured - // to use a TLS connection, using certificates from the default directories - // (within `~/.docker`); refer to EnvOverrideCertPath above for additional - // details. - // - // WARNING: Access to the remote API is equivalent to root access to the - // host where the daemon runs. Do not expose the API without protection, - // and only if needed. Make sure you are familiar with the "daemon attack - // surface" (https://docs.docker.com/go/attack-surface/). - // - // Before setting up your client and daemon to use a TCP connection with TLS - // client authentication, consider using one of the alternatives mentioned - // in EnvOverrideCertPath above. - // - // Disabling TLS certificate verification (for testing purposes) - // - // TLS certificate verification is enabled by default if the Client is configured - // to use a TLS connection, and it is highly recommended to keep verification - // enabled to prevent machine-in-the-middle attacks. Refer to the documentation - // at https://docs.docker.com/engine/security/protect-access/ and pages linked - // from that page to learn how to configure the daemon and client to use a - // TCP connection with TLS client authentication enabled. - // - // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to - // disable TLS certificate verification. Disabling verification is insecure, - // so should only be done for testing purposes. From the Go documentation - // (https://pkg.go.dev/crypto/tls#Config): - // - // InsecureSkipVerify controls whether a client verifies the server's - // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls - // accepts any certificate presented by the server and any host name in that - // certificate. In this mode, TLS is susceptible to machine-in-the-middle - // attacks unless custom verification is used. This should be used only for - // testing or in combination with VerifyConnection or VerifyPeerCertificate. - EnvTLSVerify = "DOCKER_TLS_VERIFY" -) diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go deleted file mode 100644 index aa8a280532..0000000000 --- a/vendor/github.com/docker/docker/client/errors.go +++ /dev/null @@ -1,58 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "fmt" - - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// errConnectionFailed implements an error returned when connection failed. -type errConnectionFailed struct { - host string -} - -// Error returns a string representation of an errConnectionFailed -func (err errConnectionFailed) Error() string { - if err.host == "" { - return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" - } - return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) -} - -// IsErrConnectionFailed returns true if the error is caused by connection failed. -func IsErrConnectionFailed(err error) bool { - return errors.As(err, &errConnectionFailed{}) -} - -// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. -func ErrorConnectionFailed(host string) error { - return errConnectionFailed{host: host} -} - -// IsErrNotFound returns true if the error is a NotFound error, which is returned -// by the API when some object is not found. It is an alias for [errdefs.IsNotFound]. -func IsErrNotFound(err error) bool { - return errdefs.IsNotFound(err) -} - -type objectNotFoundError struct { - object string - id string -} - -func (e objectNotFoundError) NotFound() {} - -func (e objectNotFoundError) Error() string { - return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) -} - -// NewVersionError returns an error if the APIVersion required -// if less than the current supported version -func (cli *Client) NewVersionError(APIrequired, feature string) error { - if cli.version != "" && versions.LessThan(cli.version, APIrequired) { - return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) - } - return nil -} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go deleted file mode 100644 index a9c48a9288..0000000000 --- a/vendor/github.com/docker/docker/client/events.go +++ /dev/null @@ -1,101 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - timetypes "github.com/docker/docker/api/types/time" -) - -// Events returns a stream of events in the daemon. It's up to the caller to close the stream -// by cancelling the context. Once the stream has been completely read an io.EOF error will -// be sent over the error channel. If an error is sent all processing will be stopped. It's up -// to the caller to reopen the stream in the event of an error by reinvoking this method. -func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { - messages := make(chan events.Message) - errs := make(chan error, 1) - - started := make(chan struct{}) - go func() { - defer close(errs) - - query, err := buildEventsQueryParams(cli.version, options) - if err != nil { - close(started) - errs <- err - return - } - - resp, err := cli.get(ctx, "/events", query, nil) - if err != nil { - close(started) - errs <- err - return - } - defer resp.body.Close() - - decoder := json.NewDecoder(resp.body) - - close(started) - for { - select { - case <-ctx.Done(): - errs <- ctx.Err() - return - default: - var event events.Message - if err := decoder.Decode(&event); err != nil { - errs <- err - return - } - - select { - case messages <- event: - case <-ctx.Done(): - errs <- ctx.Err() - return - } - } - } - }() - <-started - - return messages, errs -} - -func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { - query := url.Values{} - ref := time.Now() - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, ref) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, ref) - if err != nil { - return nil, err - } - query.Set("until", ts) - } - - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) - if err != nil { - return nil, err - } - query.Set("filters", filterJSON) - } - - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go deleted file mode 100644 index 9e7f6a66c3..0000000000 --- a/vendor/github.com/docker/docker/client/hijack.go +++ /dev/null @@ -1,164 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bufio" - "context" - "fmt" - "net" - "net/http" - "net/http/httputil" - "net/url" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/pkg/errors" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" - "go.opentelemetry.io/otel/trace" -) - -// postHijacked sends a POST request and hijacks the connection. -func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { - bodyEncoded, err := encodeData(body) - if err != nil { - return types.HijackedResponse{}, err - } - req, err := cli.buildRequest(ctx, http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers) - if err != nil { - return types.HijackedResponse{}, err - } - conn, mediaType, err := cli.setupHijackConn(req, "tcp") - if err != nil { - return types.HijackedResponse{}, err - } - - return types.NewHijackedResponse(conn, mediaType), err -} - -// DialHijack returns a hijacked connection with negotiated protocol proto. -func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, meta) - - conn, _, err := cli.setupHijackConn(req, proto) - return conn, err -} - -func (cli *Client) setupHijackConn(req *http.Request, proto string) (_ net.Conn, _ string, retErr error) { - ctx := req.Context() - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", proto) - - // We aren't using the configured RoundTripper here so manually inject the trace context - tp := cli.tp - if tp == nil { - if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { - tp = span.TracerProvider() - } else { - tp = otel.GetTracerProvider() - } - } - - ctx, span := tp.Tracer("").Start(ctx, req.Method+" "+req.URL.Path) - span.SetAttributes(semconv.HTTPClientAttributesFromHTTPRequest(req)...) - defer func() { - if retErr != nil { - span.RecordError(retErr) - span.SetStatus(codes.Error, retErr.Error()) - } - span.End() - }() - otel.GetTextMapPropagator().Inject(ctx, propagation.HeaderCarrier(req.Header)) - - dialer := cli.Dialer() - conn, err := dialer(ctx) - if err != nil { - return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } - - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := conn.(*net.TCPConn); ok { - _ = tcpConn.SetKeepAlive(true) - _ = tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - resp, err := clientconn.Do(req) - if resp != nil { - span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(resp.StatusCode)) - } - - //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons - if err != httputil.ErrPersistEOF { - if err != nil { - return nil, "", err - } - if resp.StatusCode != http.StatusSwitchingProtocols { - _ = resp.Body.Close() - return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) - } - } - - c, br := clientconn.Hijack() - if br.Buffered() > 0 { - // If there is buffered content, wrap the connection. We return an - // object that implements CloseWrite if the underlying connection - // implements it. - if _, ok := c.(types.CloseWriter); ok { - c = &hijackedConnCloseWriter{&hijackedConn{c, br}} - } else { - c = &hijackedConn{c, br} - } - } else { - br.Reset(nil) - } - - var mediaType string - if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") { - // Prior to 1.42, Content-Type is always set to raw-stream and not relevant - mediaType = resp.Header.Get("Content-Type") - } - - return c, mediaType, nil -} - -// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case -// that a) there was already buffered data in the http layer when Hijack() was -// called, and b) the underlying net.Conn does *not* implement CloseWrite(). -// hijackedConn does not implement CloseWrite() either. -type hijackedConn struct { - net.Conn - r *bufio.Reader -} - -func (c *hijackedConn) Read(b []byte) (int, error) { - return c.r.Read(b) -} - -// hijackedConnCloseWriter is a hijackedConn which additionally implements -// CloseWrite(). It is returned by setupHijackConn in the case that a) there -// was already buffered data in the http layer when Hijack() was called, and b) -// the underlying net.Conn *does* implement CloseWrite(). -type hijackedConnCloseWriter struct { - *hijackedConn -} - -var _ types.CloseWriter = &hijackedConnCloseWriter{} - -func (c *hijackedConnCloseWriter) CloseWrite() error { - conn := c.Conn.(types.CloseWriter) - return conn.CloseWrite() -} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go deleted file mode 100644 index d1f32ac904..0000000000 --- a/vendor/github.com/docker/docker/client/image_build.go +++ /dev/null @@ -1,144 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/base64" - "encoding/json" - "io" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" -) - -// ImageBuild sends a request to the daemon to build images. -// The Body in the response implements an io.ReadCloser and it's up to the caller to -// close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := cli.imageBuildOptionsToQuery(options) - if err != nil { - return types.ImageBuildResponse{}, err - } - - buf, err := json.Marshal(options.AuthConfigs) - if err != nil { - return types.ImageBuildResponse{}, err - } - - headers := http.Header{} - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - headers.Set("Content-Type", "application/x-tar") - - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) - if err != nil { - return types.ImageBuildResponse{}, err - } - - return types.ImageBuildResponse{ - Body: serverResp.body, - OSType: getDockerOS(serverResp.header.Get("Server")), - }, nil -} - -func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { - query := url.Values{ - "t": options.Tags, - "securityopt": options.SecurityOpt, - "extrahosts": options.ExtraHosts, - } - if options.SuppressOutput { - query.Set("q", "1") - } - if options.RemoteContext != "" { - query.Set("remote", options.RemoteContext) - } - if options.NoCache { - query.Set("nocache", "1") - } - if options.Remove { - query.Set("rm", "1") - } else { - query.Set("rm", "0") - } - - if options.ForceRemove { - query.Set("forcerm", "1") - } - - if options.PullParent { - query.Set("pull", "1") - } - - if options.Squash { - if err := cli.NewVersionError("1.25", "squash"); err != nil { - return query, err - } - query.Set("squash", "1") - } - - if !container.Isolation.IsDefault(options.Isolation) { - query.Set("isolation", string(options.Isolation)) - } - - query.Set("cpusetcpus", options.CPUSetCPUs) - query.Set("networkmode", options.NetworkMode) - query.Set("cpusetmems", options.CPUSetMems) - query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) - query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) - query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) - query.Set("memory", strconv.FormatInt(options.Memory, 10)) - query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) - query.Set("cgroupparent", options.CgroupParent) - query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) - query.Set("dockerfile", options.Dockerfile) - query.Set("target", options.Target) - - ulimitsJSON, err := json.Marshal(options.Ulimits) - if err != nil { - return query, err - } - query.Set("ulimits", string(ulimitsJSON)) - - buildArgsJSON, err := json.Marshal(options.BuildArgs) - if err != nil { - return query, err - } - query.Set("buildargs", string(buildArgsJSON)) - - labelsJSON, err := json.Marshal(options.Labels) - if err != nil { - return query, err - } - query.Set("labels", string(labelsJSON)) - - cacheFromJSON, err := json.Marshal(options.CacheFrom) - if err != nil { - return query, err - } - query.Set("cachefrom", string(cacheFromJSON)) - if options.SessionID != "" { - query.Set("session", options.SessionID) - } - if options.Platform != "" { - if err := cli.NewVersionError("1.32", "platform"); err != nil { - return query, err - } - query.Set("platform", strings.ToLower(options.Platform)) - } - if options.BuildID != "" { - query.Set("buildid", options.BuildID) - } - query.Set("version", string(options.Version)) - - if options.Outputs != nil { - outputsJSON, err := json.Marshal(options.Outputs) - if err != nil { - return query, err - } - query.Set("outputs", string(outputsJSON)) - } - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go deleted file mode 100644 index 29cd0b4373..0000000000 --- a/vendor/github.com/docker/docker/client/image_create.go +++ /dev/null @@ -1,40 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" -) - -// ImageCreate creates a new image based on the parent options. -// It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(parentReference) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) - query.Set("tag", getAPITagFromNamedRef(ref)) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - return cli.post(ctx, "/images/create", query, nil, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go deleted file mode 100644 index b5bea10d8f..0000000000 --- a/vendor/github.com/docker/docker/client/image_history.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/image" -) - -// ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { - var history []image.HistoryResponseItem - serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return history, err - } - - err = json.NewDecoder(serverResp.body).Decode(&history) - return history, err -} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go deleted file mode 100644 index cd376a14e5..0000000000 --- a/vendor/github.com/docker/docker/client/image_import.go +++ /dev/null @@ -1,40 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImageImport creates a new image based on the source options. -// It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { - if ref != "" { - // Check if the given image name can be resolved - if _, err := reference.ParseNormalizedNamed(ref); err != nil { - return nil, err - } - } - - query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) - query.Set("tag", options.Tag) - query.Set("message", options.Message) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - for _, change := range options.Changes { - query.Add("changes", change) - } - - resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go deleted file mode 100644 index 1de10e5a08..0000000000 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types" -) - -// ImageInspectWithRaw returns the image information and its raw representation. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { - if imageID == "" { - return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} - } - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ImageInspect{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return types.ImageInspect{}, nil, err - } - - var response types.ImageInspect - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go deleted file mode 100644 index 950d513334..0000000000 --- a/vendor/github.com/docker/docker/client/image_list.go +++ /dev/null @@ -1,49 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/versions" -) - -// ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { - var images []types.ImageSummary - query := url.Values{} - - optionFilters := options.Filters - referenceFilters := optionFilters.Get("reference") - if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { - query.Set("filter", referenceFilters[0]) - for _, filterValue := range referenceFilters { - optionFilters.Del("reference", filterValue) - } - } - if optionFilters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) - if err != nil { - return images, err - } - query.Set("filters", filterJSON) - } - if options.All { - query.Set("all", "1") - } - if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("shared-size", "1") - } - - serverResp, err := cli.get(ctx, "/images/json", query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return images, err - } - - err = json.NewDecoder(serverResp.body).Decode(&images) - return images, err -} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go deleted file mode 100644 index c825206ea5..0000000000 --- a/vendor/github.com/docker/docker/client/image_load.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser in the -// ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { - v := url.Values{} - v.Set("quiet", "0") - if quiet { - v.Set("quiet", "1") - } - resp, err := cli.postRaw(ctx, "/images/load", v, input, http.Header{ - "Content-Type": {"application/x-tar"}, - }) - if err != nil { - return types.ImageLoadResponse{}, err - } - return types.ImageLoadResponse{ - Body: resp.body, - JSON: resp.header.Get("Content-Type") == "application/json", - }, nil -} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go deleted file mode 100644 index 56af6d7f98..0000000000 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// ImagesPrune requests the daemon to delete unused data -func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { - var report types.ImagesPruneReport - - if err := cli.NewVersionError("1.25", "image prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go deleted file mode 100644 index d92049d588..0000000000 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ /dev/null @@ -1,64 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/errdefs" -) - -// ImagePull requests the docker host to pull an image from a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -// -// FIXME(vdemeester): there is currently used in a few way in docker/docker -// - if not in trusted content, ref is used to pass the whole reference, and tag is empty -// - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(refStr) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) - if !options.All { - query.Set("tag", getAPITagFromNamedRef(ref)) - } - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -// getAPITagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api expects -// digests to be sent as tags and makes a distinction between the name -// and tag/digest part of a reference. -func getAPITagFromNamedRef(ref reference.Named) string { - if digested, ok := ref.(reference.Digested); ok { - return digested.Digest().String() - } - ref = reference.TagNameOnly(ref) - if tagged, ok := ref.(reference.Tagged); ok { - return tagged.Tag() - } - return "" -} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go deleted file mode 100644 index 6839a89e07..0000000000 --- a/vendor/github.com/docker/docker/client/image_push.go +++ /dev/null @@ -1,57 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "errors" - "io" - "net/http" - "net/url" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" -) - -// ImagePush requests the docker host to push an image to a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(image) - if err != nil { - return nil, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return nil, errors.New("cannot push a digest reference") - } - - name := reference.FamiliarName(ref) - query := url.Values{} - if !options.All { - ref = reference.TagNameOnly(ref) - if tagged, ok := ref.(reference.Tagged); ok { - query.Set("tag", tagged.Tag()) - } - } - - resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go deleted file mode 100644 index 6a9fb3f41f..0000000000 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { - query := url.Values{} - - if options.Force { - query.Set("force", "1") - } - if !options.PruneChildren { - query.Set("noprune", "1") - } - - var dels []types.ImageDeleteResponseItem - resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return dels, err - } - - err = json.NewDecoder(resp.body).Decode(&dels) - return dels, err -} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go deleted file mode 100644 index d1314e4b22..0000000000 --- a/vendor/github.com/docker/docker/client/image_save.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" -) - -// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. -// It's up to the caller to store the images and close the stream. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { - query := url.Values{ - "names": imageIDs, - } - - resp, err := cli.get(ctx, "/images/get", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go deleted file mode 100644 index 8971b139ae..0000000000 --- a/vendor/github.com/docker/docker/client/image_search.go +++ /dev/null @@ -1,55 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" -) - -// ImageSearch makes the docker host search by a term in a remote registry. -// The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { - var results []registry.SearchResult - query := url.Values{} - query.Set("term", term) - if options.Limit > 0 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return results, err - } - query.Set("filters", filterJSON) - } - - resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - defer ensureReaderClosed(resp) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return results, privilegeErr - } - resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) - } - if err != nil { - return results, err - } - - err = json.NewDecoder(resp.body).Decode(&results) - return results, err -} - -func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - return cli.get(ctx, "/images/search", query, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go deleted file mode 100644 index ea6b4a1e65..0000000000 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/distribution/reference" - "github.com/pkg/errors" -) - -// ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, source, target string) error { - if _, err := reference.ParseAnyReference(source); err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) - } - - ref, err := reference.ParseNormalizedNamed(target) - if err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - ref = reference.TagNameOnly(ref) - - query := url.Values{} - query.Set("repo", reference.FamiliarName(ref)) - if tagged, ok := ref.(reference.Tagged); ok { - query.Set("tag", tagged.Tag()) - } - - resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go deleted file mode 100644 index cc3fcc4670..0000000000 --- a/vendor/github.com/docker/docker/client/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types/system" -) - -// Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (system.Info, error) { - var info system.Info - serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return info, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { - return info, fmt.Errorf("Error reading remote info: %v", err) - } - - return info, nil -} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go deleted file mode 100644 index b2e5d36486..0000000000 --- a/vendor/github.com/docker/docker/client/interface.go +++ /dev/null @@ -1,202 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net" - "net/http" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/system" - "github.com/docker/docker/api/types/volume" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// CommonAPIClient is the common methods between stable and experimental versions of APIClient. -type CommonAPIClient interface { - ConfigAPIClient - ContainerAPIClient - DistributionAPIClient - ImageAPIClient - NodeAPIClient - NetworkAPIClient - PluginAPIClient - ServiceAPIClient - SwarmAPIClient - SecretAPIClient - SystemAPIClient - VolumeAPIClient - ClientVersion() string - DaemonHost() string - HTTPClient() *http.Client - ServerVersion(ctx context.Context) (types.Version, error) - NegotiateAPIVersion(ctx context.Context) - NegotiateAPIVersionPing(types.Ping) - DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) - Dialer() func(context.Context) (net.Conn, error) - Close() error -} - -// ContainerAPIClient defines API client methods for the containers -type ContainerAPIClient interface { - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) - ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error - ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) - ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) - ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error - ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, options container.StopOptions) error - ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) - ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) - ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error - ContainerStop(ctx context.Context, container string, options container.StopOptions) error - ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) - ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error - ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) -} - -// DistributionAPIClient defines API client methods for the registry -type DistributionAPIClient interface { - DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) -} - -// ImageAPIClient defines API client methods for the images -type ImageAPIClient interface { - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) - BuildCancel(ctx context.Context, id string) error - ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) - ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) - ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) - ImageTag(ctx context.Context, image, ref string) error - ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) -} - -// NetworkAPIClient defines API client methods for the networks -type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, network, container string, force bool) error - NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) - NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) - NetworkRemove(ctx context.Context, network string) error - NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) -} - -// NodeAPIClient defines API client methods for the nodes -type NodeAPIClient interface { - NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) - NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) - NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error - NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error -} - -// PluginAPIClient defines API client methods for the plugins -type PluginAPIClient interface { - PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) - PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error - PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error - PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error - PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) - PluginSet(ctx context.Context, name string, args []string) error - PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) - PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error -} - -// ServiceAPIClient defines API client methods for the services -type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) - ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) - ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) - ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) - TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) -} - -// SwarmAPIClient defines API client methods for the swarm -type SwarmAPIClient interface { - SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) - SwarmJoin(ctx context.Context, req swarm.JoinRequest) error - SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) - SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error - SwarmLeave(ctx context.Context, force bool) error - SwarmInspect(ctx context.Context) (swarm.Swarm, error) - SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error -} - -// SystemAPIClient defines API client methods for the system -type SystemAPIClient interface { - Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) - Info(ctx context.Context) (system.Info, error) - RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) - DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) - Ping(ctx context.Context) (types.Ping, error) -} - -// VolumeAPIClient defines API client methods for the volumes -type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) - VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) - VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) - VolumeRemove(ctx context.Context, volumeID string, force bool) error - VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) - VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error -} - -// SecretAPIClient defines API client methods for secrets -type SecretAPIClient interface { - SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) - SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) - SecretRemove(ctx context.Context, id string) error - SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) - SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error -} - -// ConfigAPIClient defines API client methods for configs -type ConfigAPIClient interface { - ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) - ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) - ConfigRemove(ctx context.Context, id string) error - ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) - ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error -} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go deleted file mode 100644 index c585c10459..0000000000 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/checkpoint" -) - -type apiClientExperimental interface { - CheckpointAPIClient -} - -// CheckpointAPIClient defines API client methods for the checkpoints -type CheckpointAPIClient interface { - CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error - CheckpointDelete(ctx context.Context, container string, options checkpoint.DeleteOptions) error - CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) -} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go deleted file mode 100644 index 5502cd7426..0000000000 --- a/vendor/github.com/docker/docker/client/interface_stable.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - CommonAPIClient - apiClientExperimental -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go deleted file mode 100644 index 19e985e0b9..0000000000 --- a/vendor/github.com/docker/docker/client/login.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/registry" -) - -// RegistryLogin authenticates the docker server with a given docker registry. -// It returns unauthorizedError when the authentication fails. -func (cli *Client) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) { - resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) - defer ensureReaderClosed(resp) - - if err != nil { - return registry.AuthenticateOKBody{}, err - } - - var response registry.AuthenticateOKBody - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go deleted file mode 100644 index 5718946134..0000000000 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" -) - -// NetworkConnect connects a container to an existent network in the docker host. -func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - nc := types.NetworkConnect{ - Container: containerID, - EndpointConfig: config, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go deleted file mode 100644 index 278d9383a8..0000000000 --- a/vendor/github.com/docker/docker/client/network_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { - networkCreateRequest := types.NetworkCreateRequest{ - NetworkCreate: options, - Name: name, - } - var response types.NetworkCreateResponse - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go deleted file mode 100644 index dd15676656..0000000000 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ /dev/null @@ -1,15 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" -) - -// NetworkDisconnect disconnects a container from an existent network in the docker host. -func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - nd := types.NetworkDisconnect{Container: containerID, Force: force} - resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go deleted file mode 100644 index 0f90e2bb90..0000000000 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ /dev/null @@ -1,49 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - "net/url" - - "github.com/docker/docker/api/types" -) - -// NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) - return networkResource, err -} - -// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { - if networkID == "" { - return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} - } - var ( - networkResource types.NetworkResource - resp serverResponse - err error - ) - query := url.Values{} - if options.Verbose { - query.Set("verbose", "true") - } - if options.Scope != "" { - query.Set("scope", options.Scope) - } - resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return networkResource, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return networkResource, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&networkResource) - return networkResource, body, err -} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go deleted file mode 100644 index ed2acb5571..0000000000 --- a/vendor/github.com/docker/docker/client/network_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { - query := url.Values{} - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - var networkResources []types.NetworkResource - resp, err := cli.get(ctx, "/networks", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return networkResources, err - } - err = json.NewDecoder(resp.body).Decode(&networkResources) - return networkResources, err -} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go deleted file mode 100644 index cebb188219..0000000000 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// NetworksPrune requests the daemon to delete unused networks -func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { - var report types.NetworksPruneReport - - if err := cli.NewVersionError("1.25", "network prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving network prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go deleted file mode 100644 index 9d6c6cef07..0000000000 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// NetworkRemove removes an existent network from the docker host. -func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { - resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go deleted file mode 100644 index 95ab9b1be0..0000000000 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// NodeInspectWithRaw returns the node information. -func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { - if nodeID == "" { - return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} - } - serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Node{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return swarm.Node{}, nil, err - } - - var response swarm.Node - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go deleted file mode 100644 index 1a9e6bfb1b..0000000000 --- a/vendor/github.com/docker/docker/client/node_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// NodeList returns the list of nodes. -func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/nodes", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var nodes []swarm.Node - err = json.NewDecoder(resp.body).Decode(&nodes) - return nodes, err -} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go deleted file mode 100644 index e44436debc..0000000000 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// NodeRemove removes a Node. -func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go deleted file mode 100644 index 0d0fc3b788..0000000000 --- a/vendor/github.com/docker/docker/client/node_update.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// NodeUpdate updates a Node. -func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { - query := url.Values{} - query.Set("version", version.String()) - resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go deleted file mode 100644 index ddb0ca3991..0000000000 --- a/vendor/github.com/docker/docker/client/options.go +++ /dev/null @@ -1,233 +0,0 @@ -package client - -import ( - "context" - "net" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" - "go.opentelemetry.io/otel/trace" -) - -// Opt is a configuration option to initialize a [Client]. -type Opt func(*Client) error - -// FromEnv configures the client with values from environment variables. It -// is the equivalent of using the [WithTLSClientConfigFromEnv], [WithHostFromEnv], -// and [WithVersionFromEnv] options. -// -// FromEnv uses the following environment variables: -// -// - DOCKER_HOST ([EnvOverrideHost]) to set the URL to the docker server. -// - DOCKER_API_VERSION ([EnvOverrideAPIVersion]) to set the version of the -// API to use, leave empty for latest. -// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from -// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem'). -// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification -// (off by default). -func FromEnv(c *Client) error { - ops := []Opt{ - WithTLSClientConfigFromEnv(), - WithHostFromEnv(), - WithVersionFromEnv(), - } - for _, op := range ops { - if err := op(c); err != nil { - return err - } - } - return nil -} - -// WithDialContext applies the dialer to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. It returns -// an error if the client does not have a [http.Transport] configured. -func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { - return func(c *Client) error { - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.DialContext = dialContext - return nil - } - return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) - } -} - -// WithHost overrides the client host with the specified one. -func WithHost(host string) Opt { - return func(c *Client) error { - hostURL, err := ParseHostURL(host) - if err != nil { - return err - } - c.host = host - c.proto = hostURL.Scheme - c.addr = hostURL.Host - c.basePath = hostURL.Path - if transport, ok := c.client.Transport.(*http.Transport); ok { - return sockets.ConfigureTransport(transport, c.proto, c.addr) - } - return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) - } -} - -// WithHostFromEnv overrides the client host with the host specified in the -// DOCKER_HOST ([EnvOverrideHost]) environment variable. If DOCKER_HOST is not set, -// or set to an empty value, the host is not modified. -func WithHostFromEnv() Opt { - return func(c *Client) error { - if host := os.Getenv(EnvOverrideHost); host != "" { - return WithHost(host)(c) - } - return nil - } -} - -// WithHTTPClient overrides the client's HTTP client with the specified one. -func WithHTTPClient(client *http.Client) Opt { - return func(c *Client) error { - if client != nil { - c.client = client - } - return nil - } -} - -// WithTimeout configures the time limit for requests made by the HTTP client. -func WithTimeout(timeout time.Duration) Opt { - return func(c *Client) error { - c.client.Timeout = timeout - return nil - } -} - -// WithUserAgent configures the User-Agent header to use for HTTP requests. -// It overrides any User-Agent set in headers. When set to an empty string, -// the User-Agent header is removed, and no header is sent. -func WithUserAgent(ua string) Opt { - return func(c *Client) error { - c.userAgent = &ua - return nil - } -} - -// WithHTTPHeaders appends custom HTTP headers to the client's default headers. -// It does not allow for built-in headers (such as "User-Agent", if set) to -// be overridden. Also see [WithUserAgent]. -func WithHTTPHeaders(headers map[string]string) Opt { - return func(c *Client) error { - c.customHTTPHeaders = headers - return nil - } -} - -// WithScheme overrides the client scheme with the specified one. -func WithScheme(scheme string) Opt { - return func(c *Client) error { - c.scheme = scheme - return nil - } -} - -// WithTLSClientConfig applies a TLS config to the client transport. -func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { - return func(c *Client) error { - transport, ok := c.client.Transport.(*http.Transport) - if !ok { - return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) - } - config, err := tlsconfig.Client(tlsconfig.Options{ - CAFile: cacertPath, - CertFile: certPath, - KeyFile: keyPath, - ExclusiveRootPools: true, - }) - if err != nil { - return errors.Wrap(err, "failed to create tls config") - } - transport.TLSClientConfig = config - return nil - } -} - -// WithTLSClientConfigFromEnv configures the client's TLS settings with the -// settings in the DOCKER_CERT_PATH ([EnvOverrideCertPath]) and DOCKER_TLS_VERIFY -// ([EnvTLSVerify]) environment variables. If DOCKER_CERT_PATH is not set or empty, -// TLS configuration is not modified. -// -// WithTLSClientConfigFromEnv uses the following environment variables: -// -// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from -// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem"). -// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification -// (off by default). -func WithTLSClientConfigFromEnv() Opt { - return func(c *Client) error { - dockerCertPath := os.Getenv(EnvOverrideCertPath) - if dockerCertPath == "" { - return nil - } - tlsc, err := tlsconfig.Client(tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", - }) - if err != nil { - return err - } - - c.client = &http.Client{ - Transport: &http.Transport{TLSClientConfig: tlsc}, - CheckRedirect: CheckRedirect, - } - return nil - } -} - -// WithVersion overrides the client version with the specified one. If an empty -// version is provided, the value is ignored to allow version negotiation -// (see [WithAPIVersionNegotiation]). -func WithVersion(version string) Opt { - return func(c *Client) error { - if version != "" { - c.version = version - c.manualOverride = true - } - return nil - } -} - -// WithVersionFromEnv overrides the client version with the version specified in -// the DOCKER_API_VERSION ([EnvOverrideAPIVersion]) environment variable. -// If DOCKER_API_VERSION is not set, or set to an empty value, the version -// is not modified. -func WithVersionFromEnv() Opt { - return func(c *Client) error { - return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) - } -} - -// WithAPIVersionNegotiation enables automatic API version negotiation for the client. -// With this option enabled, the client automatically negotiates the API version -// to use when making requests. API version negotiation is performed on the first -// request; subsequent requests do not re-negotiate. -func WithAPIVersionNegotiation() Opt { - return func(c *Client) error { - c.negotiateVersion = true - return nil - } -} - -// WithTraceProvider sets the trace provider for the client. -// If this is not set then the global trace provider will be used. -func WithTraceProvider(provider trace.TracerProvider) Opt { - return func(c *Client) error { - c.tp = provider - return nil - } -} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go deleted file mode 100644 index dfd1042fab..0000000000 --- a/vendor/github.com/docker/docker/client/ping.go +++ /dev/null @@ -1,73 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/http" - "path" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/errdefs" -) - -// Ping pings the server and returns the value of the "Docker-Experimental", -// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use -// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported -// by the daemon. -func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { - var ping types.Ping - - // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() - // because ping requests are used during API version negotiation, so we want - // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping - req, err := cli.buildRequest(ctx, http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) - if err != nil { - return ping, err - } - serverResp, err := cli.doRequest(req) - if err == nil { - defer ensureReaderClosed(serverResp) - switch serverResp.statusCode { - case http.StatusOK, http.StatusInternalServerError: - // Server handled the request, so parse the response - return parsePingResponse(cli, serverResp) - } - } else if IsErrConnectionFailed(err) { - return ping, err - } - - // HEAD failed; fallback to GET. - req.Method = http.MethodGet - serverResp, err = cli.doRequest(req) - defer ensureReaderClosed(serverResp) - if err != nil { - return ping, err - } - return parsePingResponse(cli, serverResp) -} - -func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { - var ping types.Ping - if resp.header == nil { - err := cli.checkResponseErr(resp) - return ping, errdefs.FromStatusCode(err, resp.statusCode) - } - ping.APIVersion = resp.header.Get("API-Version") - ping.OSType = resp.header.Get("OSType") - if resp.header.Get("Docker-Experimental") == "true" { - ping.Experimental = true - } - if bv := resp.header.Get("Builder-Version"); bv != "" { - ping.BuilderVersion = types.BuilderVersion(bv) - } - if si := resp.header.Get("Swarm"); si != "" { - state, role, _ := strings.Cut(si, "/") - ping.SwarmStatus = &swarm.Status{ - NodeState: swarm.LocalNodeState(state), - ControlAvailable: role == "manager", - } - } - err := cli.checkResponseErr(resp) - return ping, errdefs.FromStatusCode(err, resp.statusCode) -} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go deleted file mode 100644 index b95dbaf686..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_create.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginCreate creates a plugin -func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { - headers := http.Header(make(map[string][]string)) - headers.Set("Content-Type", "application/x-tar") - - query := url.Values{} - query.Set("name", createOptions.RepoName) - - resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go deleted file mode 100644 index 01f6574f95..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginDisable disables a plugin -func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go deleted file mode 100644 index 736da48bd1..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" -) - -// PluginEnable enables a plugin -func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { - query := url.Values{} - query.Set("timeout", strconv.Itoa(options.Timeout)) - - resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go deleted file mode 100644 index f09e460660..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types" -) - -// PluginInspectWithRaw inspects an existing plugin -func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { - if name == "" { - return nil, nil, objectNotFoundError{object: "plugin", id: name} - } - resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return nil, nil, err - } - var p types.Plugin - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&p) - return &p, body, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go deleted file mode 100644 index 69184619a2..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ /dev/null @@ -1,117 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "io" - "net/http" - "net/url" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// PluginInstall installs a plugin -func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - // set name for plugin pull, if empty should default to remote reference - query.Set("name", name) - - resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) - if err != nil { - return nil, err - } - - name = resp.header.Get("Docker-Plugin-Name") - - pr, pw := io.Pipe() - go func() { // todo: the client should probably be designed more around the actual api - _, err := io.Copy(pw, resp.body) - if err != nil { - pw.CloseWithError(err) - return - } - defer func() { - if err != nil { - delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) - ensureReaderClosed(delResp) - } - }() - if len(options.Args) > 0 { - if err := cli.PluginSet(ctx, name, options.Args); err != nil { - pw.CloseWithError(err) - return - } - } - - if options.Disabled { - pw.Close() - return - } - - enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) - pw.CloseWithError(enableErr) - }() - return pr, nil -} - -func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - return cli.get(ctx, "/plugins/privileges", query, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} - -func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { - return cli.post(ctx, "/plugins/pull", query, privileges, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} - -func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { - resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - // todo: do inspect before to check existing name before checking privileges - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - ensureReaderClosed(resp) - return nil, privilegeErr - } - options.RegistryAuth = newAuthHeader - resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - } - if err != nil { - ensureReaderClosed(resp) - return nil, err - } - - var privileges types.PluginPrivileges - if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { - ensureReaderClosed(resp) - return nil, err - } - ensureReaderClosed(resp) - - if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { - accept, err := options.AcceptPermissionsFunc(privileges) - if err != nil { - return nil, err - } - if !accept { - return nil, errors.Errorf("permission denied while installing plugin %s", options.RemoteRef) - } - } - return privileges, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go deleted file mode 100644 index 2091a054d6..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// PluginList returns the installed plugins -func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { - var plugins types.PluginsListResponse - query := url.Values{} - - if filter.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return plugins, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/plugins", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return plugins, err - } - - err = json.NewDecoder(resp.body).Decode(&plugins) - return plugins, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go deleted file mode 100644 index 8f68a86eee..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - - "github.com/docker/docker/api/types/registry" -) - -// PluginPush pushes a plugin to a registry -func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { - resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, http.Header{ - registry.AuthHeader: {registryAuth}, - }) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go deleted file mode 100644 index 4cd66958c3..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginRemove removes a plugin -func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go deleted file mode 100644 index dcf5752ca2..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ /dev/null @@ -1,12 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" -) - -// PluginSet modifies settings for an existing plugin -func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { - resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go deleted file mode 100644 index 0c26a9fb8a..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ /dev/null @@ -1,42 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" - "github.com/pkg/errors" -) - -// PluginUpgrade upgrades a plugin -func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { - return nil, err - } - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { - return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go deleted file mode 100644 index efe07bb9ea..0000000000 --- a/vendor/github.com/docker/docker/client/request.go +++ /dev/null @@ -1,282 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "reflect" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// serverResponse is a wrapper for http API responses. -type serverResponse struct { - body io.ReadCloser - header http.Header - statusCode int - reqURL *url.URL -} - -// head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) -} - -// get sends an http request to the docker API using the method GET with a specific Go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) -} - -// post sends an http request to the docker API using the method POST with a specific Go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) -} - -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) -} - -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.putRaw(ctx, path, query, body, headers) -} - -// putRaw sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { - // PUT requests are expected to always have a body (apparently) - // so explicitly pass an empty body to sendRequest to signal that - // it should set the Content-Type header if not already present. - if body == nil { - body = http.NoBody - } - return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) -} - -// delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) -} - -func encodeBody(obj interface{}, headers http.Header) (io.Reader, http.Header, error) { - if obj == nil { - return nil, headers, nil - } - // encoding/json encodes a nil pointer as the JSON document `null`, - // irrespective of whether the type implements json.Marshaler or encoding.TextMarshaler. - // That is almost certainly not what the caller intended as the request body. - if reflect.TypeOf(obj).Kind() == reflect.Ptr && reflect.ValueOf(obj).IsNil() { - return nil, headers, nil - } - - body, err := encodeData(obj) - if err != nil { - return nil, headers, err - } - if headers == nil { - headers = make(map[string][]string) - } - headers["Content-Type"] = []string{"application/json"} - return body, headers, nil -} - -func (cli *Client) buildRequest(ctx context.Context, method, path string, body io.Reader, headers http.Header) (*http.Request, error) { - req, err := http.NewRequestWithContext(ctx, method, path, body) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, headers) - req.URL.Scheme = cli.scheme - req.URL.Host = cli.addr - - if cli.proto == "unix" || cli.proto == "npipe" { - // Override host header for non-tcp connections. - req.Host = DummyHost - } - - if body != nil && req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "text/plain") - } - return req, nil -} - -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { - req, err := cli.buildRequest(ctx, method, cli.getAPIPath(ctx, path, query), body, headers) - if err != nil { - return serverResponse{}, err - } - - resp, err := cli.doRequest(req) - switch { - case errors.Is(err, context.Canceled): - return serverResponse{}, errdefs.Cancelled(err) - case errors.Is(err, context.DeadlineExceeded): - return serverResponse{}, errdefs.Deadline(err) - case err == nil: - err = cli.checkResponseErr(resp) - } - return resp, errdefs.FromStatusCode(err, resp.statusCode) -} - -func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { - serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - - resp, err := cli.client.Do(req) - if err != nil { - if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) - } - - if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings") - } - - // Don't decorate context sentinel errors; users may be comparing to - // them directly. - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return serverResp, err - } - - if uErr, ok := err.(*url.Error); ok { - if nErr, ok := uErr.Err.(*net.OpError); ok { - if os.IsPermission(nErr.Err) { - return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) - } - } - } - - if nErr, ok := err.(net.Error); ok { - if nErr.Timeout() { - return serverResp, ErrorConnectionFailed(cli.host) - } - if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") { - return serverResp, ErrorConnectionFailed(cli.host) - } - } - - // Although there's not a strongly typed error for this in go-winio, - // lots of people are using the default configuration for the docker - // daemon on Windows where the daemon is listening on a named pipe - // `//./pipe/docker_engine, and the client must be running elevated. - // Give users a clue rather than the not-overly useful message - // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: - // open //./pipe/docker_engine: The system cannot find the file specified.`. - // Note we can't string compare "The system cannot find the file specified" as - // this is localised - for example in French the error would be - // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` - if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { - // Checks if client is running with elevated privileges - if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { - err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") - } else { - f.Close() - err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") - } - } - - return serverResp, errors.Wrap(err, "error during connect") - } - - if resp != nil { - serverResp.statusCode = resp.StatusCode - serverResp.body = resp.Body - serverResp.header = resp.Header - } - return serverResp, nil -} - -func (cli *Client) checkResponseErr(serverResp serverResponse) error { - if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { - return nil - } - - var body []byte - var err error - if serverResp.body != nil { - bodyMax := 1 * 1024 * 1024 // 1 MiB - bodyR := &io.LimitedReader{ - R: serverResp.body, - N: int64(bodyMax), - } - body, err = io.ReadAll(bodyR) - if err != nil { - return err - } - if bodyR.N == 0 { - return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) - } - } - if len(body) == 0 { - return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) - } - - var daemonErr error - if serverResp.header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) { - var errorResponse types.ErrorResponse - if err := json.Unmarshal(body, &errorResponse); err != nil { - return errors.Wrap(err, "Error reading JSON") - } - daemonErr = errors.New(strings.TrimSpace(errorResponse.Message)) - } else { - daemonErr = errors.New(strings.TrimSpace(string(body))) - } - return errors.Wrap(daemonErr, "Error response from daemon") -} - -func (cli *Client) addHeaders(req *http.Request, headers http.Header) *http.Request { - // Add CLI Config's HTTP Headers BEFORE we set the Docker headers - // then the user can't change OUR headers - for k, v := range cli.customHTTPHeaders { - if versions.LessThan(cli.version, "1.25") && http.CanonicalHeaderKey(k) == "User-Agent" { - continue - } - req.Header.Set(k, v) - } - - for k, v := range headers { - req.Header[http.CanonicalHeaderKey(k)] = v - } - - if cli.userAgent != nil { - if *cli.userAgent == "" { - req.Header.Del("User-Agent") - } else { - req.Header.Set("User-Agent", *cli.userAgent) - } - } - return req -} - -func encodeData(data interface{}) (*bytes.Buffer, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if err := json.NewEncoder(params).Encode(data); err != nil { - return nil, err - } - } - return params, nil -} - -func ensureReaderClosed(response serverResponse) { - if response.body != nil { - // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(io.Discard, response.body, 512) - response.body.Close() - } -} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go deleted file mode 100644 index c65d38a191..0000000000 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// SecretCreate creates a new secret. -func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { - var response types.SecretCreateResponse - if err := cli.NewVersionError("1.25", "secret create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go deleted file mode 100644 index 5906874b15..0000000000 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretInspectWithRaw returns the secret information with raw data -func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { - return swarm.Secret{}, nil, err - } - if id == "" { - return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} - } - resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Secret{}, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return swarm.Secret{}, nil, err - } - - var secret swarm.Secret - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&secret) - - return secret, body, err -} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go deleted file mode 100644 index a0289c9f44..0000000000 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// SecretList returns the list of secrets. -func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - if err := cli.NewVersionError("1.25", "secret list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/secrets", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var secrets []swarm.Secret - err = json.NewDecoder(resp.body).Decode(&secrets) - return secrets, err -} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go deleted file mode 100644 index f47f68b6e0..0000000000 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// SecretRemove removes a secret. -func (cli *Client) SecretRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.25", "secret remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go deleted file mode 100644 index 2e939e8ced..0000000000 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretUpdate attempts to update a secret. -func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { - if err := cli.NewVersionError("1.25", "secret update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", version.String()) - resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go deleted file mode 100644 index f39e63e3a9..0000000000 --- a/vendor/github.com/docker/docker/client/service_create.go +++ /dev/null @@ -1,184 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/versions" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// ServiceCreate creates a new service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var response types.ServiceCreateResponse - - // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container - if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { - service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} - } - - if err := validateServiceSpec(service); err != nil { - return response, err - } - - // ensure that the image is tagged - var resolveWarning string - switch { - case service.TaskTemplate.ContainerSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - case service.TaskTemplate.PluginSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - } - - headers := http.Header{} - if versions.LessThan(cli.version, "1.30") { - // the custom "version" header was used by engine API before 20.10 - // (API 1.30) to switch between client- and server-side lookup of - // image digests. - headers["version"] = []string{cli.version} - } - if options.EncodedRegistryAuth != "" { - headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} - } - resp, err := cli.post(ctx, "/services/create", nil, service, headers) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - if resolveWarning != "" { - response.Warnings = append(response.Warnings, resolveWarning) - } - - return response, err -} - -func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { - var warning string - if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth); err != nil { - warning = digestWarning(taskSpec.ContainerSpec.Image) - } else { - taskSpec.ContainerSpec.Image = img - if len(imgPlatforms) > 0 { - if taskSpec.Placement == nil { - taskSpec.Placement = &swarm.Placement{} - } - taskSpec.Placement.Platforms = imgPlatforms - } - } - return warning -} - -func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { - var warning string - if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth); err != nil { - warning = digestWarning(taskSpec.PluginSpec.Remote) - } else { - taskSpec.PluginSpec.Remote = img - if len(imgPlatforms) > 0 { - if taskSpec.Placement == nil { - taskSpec.Placement = &swarm.Placement{} - } - taskSpec.Placement.Platforms = imgPlatforms - } - } - return warning -} - -func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { - distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) - var platforms []swarm.Platform - if err != nil { - return "", nil, err - } - - imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) - - if len(distributionInspect.Platforms) > 0 { - platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) - for _, p := range distributionInspect.Platforms { - // clear architecture field for arm. This is a temporary patch to address - // https://github.com/docker/swarmkit/issues/2294. The issue is that while - // image manifests report "arm" as the architecture, the node reports - // something like "armv7l" (includes the variant), which causes arm images - // to stop working with swarm mode. This patch removes the architecture - // constraint for arm images to ensure tasks get scheduled. - arch := p.Architecture - if strings.ToLower(arch) == "arm" { - arch = "" - } - platforms = append(platforms, swarm.Platform{ - Architecture: arch, - OS: p.OS, - }) - } - } - return imageWithDigest, platforms, err -} - -// imageWithDigestString takes an image string and a digest, and updates -// the image string if it didn't originally contain a digest. It returns -// image unmodified in other situations. -func imageWithDigestString(image string, dgst digest.Digest) string { - namedRef, err := reference.ParseNormalizedNamed(image) - if err == nil { - if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { - // ensure that image gets a default tag if none is provided - img, err := reference.WithDigest(namedRef, dgst) - if err == nil { - return reference.FamiliarString(img) - } - } - } - return image -} - -// imageWithTagString takes an image string, and returns a tagged image -// string, adding a 'latest' tag if one was not provided. It returns an -// empty string if a canonical reference was provided -func imageWithTagString(image string) string { - namedRef, err := reference.ParseNormalizedNamed(image) - if err == nil { - return reference.FamiliarString(reference.TagNameOnly(namedRef)) - } - return "" -} - -// digestWarning constructs a formatted warning string using the -// image name that could not be pinned by digest. The formatting -// is hardcoded, but could me made smarter in the future -func digestWarning(image string) string { - return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) -} - -func validateServiceSpec(s swarm.ServiceSpec) error { - if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { - return errors.New("must not specify both a container spec and a plugin spec in the task template") - } - if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { - return errors.New("mismatched runtime with plugin spec") - } - if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { - return errors.New("mismatched runtime with container spec") - } - return nil -} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go deleted file mode 100644 index cee020c98b..0000000000 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceInspectWithRaw returns the service information and the raw data. -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { - if serviceID == "" { - return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} - } - query := url.Values{} - query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) - serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Service{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return swarm.Service{}, nil, err - } - - var response swarm.Service - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go deleted file mode 100644 index f97ec75a5c..0000000000 --- a/vendor/github.com/docker/docker/client/service_list.go +++ /dev/null @@ -1,39 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceList returns the list of services. -func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - if options.Status { - query.Set("status", "true") - } - - resp, err := cli.get(ctx, "/services", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var services []swarm.Service - err = json.NewDecoder(resp.body).Decode(&services) - return services, err -} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go deleted file mode 100644 index 906fd4059e..0000000000 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ /dev/null @@ -1,52 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" - "github.com/pkg/errors" -) - -// ServiceLogs returns the logs generated by a service in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "since"`) - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go deleted file mode 100644 index 2c46326ebc..0000000000 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ServiceRemove kills and removes a service. -func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { - resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go deleted file mode 100644 index a27cea0d96..0000000000 --- a/vendor/github.com/docker/docker/client/service_update.go +++ /dev/null @@ -1,79 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/versions" -) - -// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. -// It should be the value as set *before* the update. You can find this value in the Meta field -// of swarm.Service, which can be found using ServiceInspectWithRaw. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { - var ( - query = url.Values{} - response = types.ServiceUpdateResponse{} - ) - - if options.RegistryAuthFrom != "" { - query.Set("registryAuthFrom", options.RegistryAuthFrom) - } - - if options.Rollback != "" { - query.Set("rollback", options.Rollback) - } - - query.Set("version", version.String()) - - if err := validateServiceSpec(service); err != nil { - return response, err - } - - // ensure that the image is tagged - var resolveWarning string - switch { - case service.TaskTemplate.ContainerSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - case service.TaskTemplate.PluginSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - } - - headers := http.Header{} - if versions.LessThan(cli.version, "1.30") { - // the custom "version" header was used by engine API before 20.10 - // (API 1.30) to switch between client- and server-side lookup of - // image digests. - headers["version"] = []string{cli.version} - } - if options.EncodedRegistryAuth != "" { - headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} - } - resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - if resolveWarning != "" { - response.Warnings = append(response.Warnings, resolveWarning) - } - - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go deleted file mode 100644 index 19f59dd582..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// SwarmGetUnlockKey retrieves the swarm's unlock key. -func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { - serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.SwarmUnlockKeyResponse{}, err - } - - var response types.SwarmUnlockKeyResponse - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go deleted file mode 100644 index da3c1637ef..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmInit initializes the swarm. -func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { - serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return "", err - } - - var response string - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go deleted file mode 100644 index b52b67a884..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmInspect inspects the swarm. -func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { - serverResp, err := cli.get(ctx, "/swarm", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Swarm{}, err - } - - var response swarm.Swarm - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go deleted file mode 100644 index a1cf0455d2..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_join.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmJoin joins the swarm. -func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { - resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go deleted file mode 100644 index 90ca84b363..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_leave.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// SwarmLeave leaves the swarm. -func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { - query := url.Values{} - if force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go deleted file mode 100644 index d2412f7d44..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_unlock.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmUnlock unlocks locked swarm. -func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { - serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) - ensureReaderClosed(serverResp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go deleted file mode 100644 index 9fde7d75ee..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmUpdate updates the swarm. -func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { - query := url.Values{} - query.Set("version", version.String()) - query.Set("rotateWorkerToken", strconv.FormatBool(flags.RotateWorkerToken)) - query.Set("rotateManagerToken", strconv.FormatBool(flags.RotateManagerToken)) - query.Set("rotateManagerUnlockKey", strconv.FormatBool(flags.RotateManagerUnlockKey)) - resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go deleted file mode 100644 index dde1f6c59d..0000000000 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// TaskInspectWithRaw returns the task information and its raw representation. -func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { - if taskID == "" { - return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} - } - serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Task{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return swarm.Task{}, nil, err - } - - var response swarm.Task - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go deleted file mode 100644 index 4869b44493..0000000000 --- a/vendor/github.com/docker/docker/client/task_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// TaskList returns the list of tasks. -func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/tasks", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var tasks []swarm.Task - err = json.NewDecoder(resp.body).Decode(&tasks) - return tasks, err -} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go deleted file mode 100644 index 6222fab577..0000000000 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ /dev/null @@ -1,51 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" -) - -// TaskLogs returns the logs generated by a task in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go deleted file mode 100644 index 7f3ff44eb8..0000000000 --- a/vendor/github.com/docker/docker/client/utils.go +++ /dev/null @@ -1,34 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "regexp" - - "github.com/docker/docker/api/types/filters" -) - -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) - -// getDockerOS returns the operating system based on the server header from the daemon. -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} - -// getFiltersQuery returns a url query with "filters" query term, based on the -// filters provided. -func getFiltersQuery(f filters.Args) (url.Values, error) { - query := url.Values{} - if f.Len() > 0 { - filterJSON, err := filters.ToJSON(f) - if err != nil { - return query, err - } - query.Set("filters", filterJSON) - } - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go deleted file mode 100644 index 8f17ff4e87..0000000000 --- a/vendor/github.com/docker/docker/client/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// ServerVersion returns information of the docker client and server host. -func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { - resp, err := cli.get(ctx, "/version", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return types.Version{}, err - } - - var server types.Version - err = json.NewDecoder(resp.body).Decode(&server) - return server, err -} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go deleted file mode 100644 index b3b182437b..0000000000 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/volume" -) - -// VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) { - var vol volume.Volume - resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) - defer ensureReaderClosed(resp) - if err != nil { - return vol, err - } - err = json.NewDecoder(resp.body).Decode(&vol) - return vol, err -} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go deleted file mode 100644 index b3ba4e6046..0000000000 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/volume" -) - -// VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) { - vol, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) - return vol, err -} - -// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation -func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) { - if volumeID == "" { - return volume.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} - } - - var vol volume.Volume - resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return vol, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return vol, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&vol) - return vol, body, err -} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go deleted file mode 100644 index d5ea9827c7..0000000000 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/volume" -) - -// VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) { - var volumes volume.ListResponse - query := url.Values{} - - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return volumes, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/volumes", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return volumes, err - } - - err = json.NewDecoder(resp.body).Decode(&volumes) - return volumes, err -} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go deleted file mode 100644 index 6e324708f2..0000000000 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// VolumesPrune requests the daemon to delete unused data -func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { - var report types.VolumesPruneReport - - if err := cli.NewVersionError("1.25", "volume prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving volume prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go deleted file mode 100644 index 1f26438360..0000000000 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/versions" -) - -// VolumeRemove removes a volume from the docker host. -func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { - query := url.Values{} - if versions.GreaterThanOrEqualTo(cli.version, "1.25") { - if force { - query.Set("force", "1") - } - } - resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go deleted file mode 100644 index 33bd31e531..0000000000 --- a/vendor/github.com/docker/docker/client/volume_update.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/volume" -) - -// VolumeUpdate updates a volume. This only works for Cluster Volumes, and -// only some fields can be updated. -func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { - if err := cli.NewVersionError("1.42", "volume update"); err != nil { - return err - } - - query := url.Values{} - query.Set("version", version.String()) - - resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go b/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go deleted file mode 100644 index 1672617635..0000000000 --- a/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go +++ /dev/null @@ -1,54 +0,0 @@ -package v1 - -import ( - "time" - - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -const DockerOCIImageMediaType = "application/vnd.docker.container.image.v1+json" - -// DockerOCIImage is a ocispec.Image extended with Docker specific Config. -type DockerOCIImage struct { - ocispec.Image - - // Shadow ocispec.Image.Config - Config DockerOCIImageConfig `json:"config,omitempty"` -} - -// DockerOCIImageConfig is a ocispec.ImageConfig extended with Docker specific fields. -type DockerOCIImageConfig struct { - ocispec.ImageConfig - - DockerOCIImageConfigExt -} - -// DockerOCIImageConfigExt contains Docker-specific fields in DockerImageConfig. -type DockerOCIImageConfigExt struct { - Healthcheck *HealthcheckConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - - OnBuild []string `json:",omitempty"` // ONBUILD metadata that were defined on the image Dockerfile - Shell []string `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// HealthcheckConfig holds configuration settings for the HEALTHCHECK feature. -type HealthcheckConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - StartInterval time.Duration `json:",omitempty"` // The interval to attempt healthchecks at during the start period - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/internal/multierror/multierror.go b/vendor/github.com/docker/docker/internal/multierror/multierror.go deleted file mode 100644 index cf4d6a5957..0000000000 --- a/vendor/github.com/docker/docker/internal/multierror/multierror.go +++ /dev/null @@ -1,46 +0,0 @@ -package multierror - -import ( - "strings" -) - -// Join is a drop-in replacement for errors.Join with better formatting. -func Join(errs ...error) error { - n := 0 - for _, err := range errs { - if err != nil { - n++ - } - } - if n == 0 { - return nil - } - e := &joinError{ - errs: make([]error, 0, n), - } - for _, err := range errs { - if err != nil { - e.errs = append(e.errs, err) - } - } - return e -} - -type joinError struct { - errs []error -} - -func (e *joinError) Error() string { - if len(e.errs) == 1 { - return strings.TrimSpace(e.errs[0].Error()) - } - stringErrs := make([]string, 0, len(e.errs)) - for _, subErr := range e.errs { - stringErrs = append(stringErrs, strings.Replace(subErr.Error(), "\n", "\n\t", -1)) - } - return "* " + strings.Join(stringErrs, "\n* ") -} - -func (e *joinError) Unwrap() []error { - return e.errs -} diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go deleted file mode 100644 index 4049d780c5..0000000000 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ /dev/null @@ -1,240 +0,0 @@ -// Package nat is a convenience package for manipulation of strings describing network ports. -package nat - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -// PortBinding represents a binding between a Host IP address and a Host Port -type PortBinding struct { - // HostIP is the host IP Address - HostIP string `json:"HostIp"` - // HostPort is the host port number - HostPort string -} - -// PortMap is a collection of PortBinding indexed by Port -type PortMap map[Port][]PortBinding - -// PortSet is a collection of structs indexed by Port -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -type Port string - -// NewPort creates a new instance of a Port given a protocol and port number or port range -func NewPort(proto, port string) (Port, error) { - // Check for parsing issues on "port" now so we can avoid having - // to check it later on. - - portStartInt, portEndInt, err := ParsePortRangeToInt(port) - if err != nil { - return "", err - } - - if portStartInt == portEndInt { - return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil - } - return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil -} - -// ParsePort parses the port number string and returns an int -func ParsePort(rawPort string) (int, error) { - if len(rawPort) == 0 { - return 0, nil - } - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// ParsePortRangeToInt parses the port range string and returns start/end ints -func ParsePortRangeToInt(rawPort string) (int, int, error) { - if len(rawPort) == 0 { - return 0, 0, nil - } - start, end, err := ParsePortRange(rawPort) - if err != nil { - return 0, 0, err - } - return int(start), int(end), nil -} - -// Proto returns the protocol of a Port -func (p Port) Proto() string { - proto, _ := SplitProtoPort(string(p)) - return proto -} - -// Port returns the port number of a Port -func (p Port) Port() string { - _, port := SplitProtoPort(string(p)) - return port -} - -// Int returns the port number of a Port as an int -func (p Port) Int() int { - portStr := p.Port() - // We don't need to check for an error because we're going to - // assume that any error would have been found, and reported, in NewPort() - port, _ := ParsePort(portStr) - return port -} - -// Range returns the start/end port numbers of a Port range as ints -func (p Port) Range() (int, int, error) { - return ParsePortRangeToInt(p.Port()) -} - -// SplitProtoPort splits a port in the format of proto/port -func SplitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - if len(parts[1]) == 0 { - return "tcp", parts[0] - } - return parts[1], parts[0] -} - -func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp", "sctp"} { - if availableProto == proto { - return true - } - } - return false -} - -// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses -// these in to the internal types -func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - for _, rawPort := range ports { - portMappings, err := ParsePortSpec(rawPort) - if err != nil { - return nil, nil, err - } - - for _, portMapping := range portMappings { - port := portMapping.Port - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, portMapping.Binding) - } - } - return exposedPorts, bindings, nil -} - -// PortMapping is a data object mapping a Port to a PortBinding -type PortMapping struct { - Port Port - Binding PortBinding -} - -func splitParts(rawport string) (string, string, string) { - parts := strings.Split(rawport, ":") - n := len(parts) - containerPort := parts[n-1] - - switch n { - case 1: - return "", "", containerPort - case 2: - return "", parts[0], containerPort - case 3: - return parts[0], parts[1], containerPort - default: - return strings.Join(parts[:n-2], ":"), parts[n-2], containerPort - } -} - -// ParsePortSpec parses a port specification string into a slice of PortMappings -func ParsePortSpec(rawPort string) ([]PortMapping, error) { - var proto string - ip, hostPort, containerPort := splitParts(rawPort) - proto, containerPort = SplitProtoPort(containerPort) - - if ip != "" && ip[0] == '[' { - // Strip [] from IPV6 addresses - rawIP, _, err := net.SplitHostPort(ip + ":") - if err != nil { - return nil, fmt.Errorf("invalid IP address %v: %w", ip, err) - } - ip = rawIP - } - if ip != "" && net.ParseIP(ip) == nil { - return nil, fmt.Errorf("invalid IP address: %s", ip) - } - if containerPort == "" { - return nil, fmt.Errorf("no port specified: %s", rawPort) - } - - startPort, endPort, err := ParsePortRange(containerPort) - if err != nil { - return nil, fmt.Errorf("invalid containerPort: %s", containerPort) - } - - var startHostPort, endHostPort uint64 = 0, 0 - if len(hostPort) > 0 { - startHostPort, endHostPort, err = ParsePortRange(hostPort) - if err != nil { - return nil, fmt.Errorf("invalid hostPort: %s", hostPort) - } - } - - if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - // Allow host port range iff containerPort is not a range. - // In this case, use the host port range as the dynamic - // host port range to allocate into. - if endPort != startPort { - return nil, fmt.Errorf("invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) - } - } - - if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("invalid proto: %s", proto) - } - - ports := []PortMapping{} - for i := uint64(0); i <= (endPort - startPort); i++ { - containerPort = strconv.FormatUint(startPort+i, 10) - if len(hostPort) > 0 { - hostPort = strconv.FormatUint(startHostPort+i, 10) - } - // Set hostPort to a range only if there is a single container port - // and a dynamic host port. - if startPort == endPort && startHostPort != endHostPort { - hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) - } - port, err := NewPort(strings.ToLower(proto), containerPort) - if err != nil { - return nil, err - } - - binding := PortBinding{ - HostIP: ip, - HostPort: hostPort, - } - ports = append(ports, PortMapping{Port: port, Binding: binding}) - } - return ports, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go deleted file mode 100644 index e4b53e8a32..0000000000 --- a/vendor/github.com/docker/go-connections/nat/parse.go +++ /dev/null @@ -1,33 +0,0 @@ -package nat - -import ( - "fmt" - "strconv" - "strings" -) - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("empty string specified for ports") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("invalid range specified for port: %s", ports) - } - return start, end, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go deleted file mode 100644 index b6eed145e1..0000000000 --- a/vendor/github.com/docker/go-connections/nat/sort.go +++ /dev/null @@ -1,96 +0,0 @@ -package nat - -import ( - "sort" - "strings" -) - -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -// Sort sorts a list of ports using the provided predicate -// This function should compare `i` and `j`, returning true if `i` is -// considered to be less than `j` -func Sort(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - -type portMapEntry struct { - port Port - binding PortBinding -} - -type portMapSorter []portMapEntry - -func (s portMapSorter) Len() int { return len(s) } -func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// Less sorts the port so that the order is: -// 1. port with larger specified bindings -// 2. larger port -// 3. port with tcp protocol -func (s portMapSorter) Less(i, j int) bool { - pi, pj := s[i].port, s[j].port - hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) - return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") -} - -// SortPortMap sorts the list of ports and their respected mapping. The ports -// will explicit HostPort will be placed first. -func SortPortMap(ports []Port, bindings PortMap) { - s := portMapSorter{} - for _, p := range ports { - if binding, ok := bindings[p]; ok && len(binding) > 0 { - for _, b := range binding { - s = append(s, portMapEntry{port: p, binding: b}) - } - bindings[p] = []PortBinding{} - } else { - s = append(s, portMapEntry{port: p}) - } - } - - sort.Sort(s) - var ( - i int - pm = make(map[Port]struct{}) - ) - // reorder ports - for _, entry := range s { - if _, ok := pm[entry.port]; !ok { - ports[i] = entry.port - pm[entry.port] = struct{}{} - i++ - } - // reorder bindings for this port - if _, ok := bindings[entry.port]; ok { - bindings[entry.port] = append(bindings[entry.port], entry.binding) - } - } -} - -func toInt(s string) uint64 { - i, _, err := ParsePortRange(s) - if err != nil { - i = 0 - } - return i -} diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784e..0000000000 --- a/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37bc31..0000000000 --- a/vendor/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 4aac7c7411..0000000000 --- a/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-units maintainers file -# -# This file describes who runs the docker/go-units project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "akihirosuda", - "dnephin", - "thajeztah", - "vdemeester", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.akihirosuda] - Name = "Akihiro Suda" - Email = "akihiro.suda.cz@hco.ntt.co.jp" - GitHub = "AkihiroSuda" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 4f70a4e134..0000000000 --- a/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,16 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. - -go-units is licensed under the Apache License, Version 2.0. -See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index af9d605529..0000000000 --- a/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get golang.org/x/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go deleted file mode 100644 index 48dd8744d4..0000000000 --- a/vendor/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds == 1 { - return "1 second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours() + 0.5); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*2 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go deleted file mode 100644 index c245a89513..0000000000 --- a/vendor/github.com/docker/go-units/size.go +++ /dev/null @@ -1,154 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[byte]int64 - -var ( - decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} - binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} -) - -var ( - decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} -) - -func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { - i := 0 - unitsLimit := len(_map) - 1 - for size >= base && i < unitsLimit { - size = size / base - i++ - } - return size, _map[i] -} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - size, unit := getSizeAndUnit(size, base, _map) - return fmt.Sprintf(format, size, unit) -} - -// HumanSizeWithPrecision allows the size to be in any precision, -// instead of 4 digit precision used in units.HumanSize. -func HumanSizeWithPrecision(size float64, precision int) string { - size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g%s", precision, size, unit) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return HumanSizeWithPrecision(size, 4) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - // TODO: rewrite to use strings.Cut if there's a space - // once Go < 1.18 is deprecated. - sep := strings.LastIndexAny(sizeStr, "01234567890. ") - if sep == -1 { - // There should be at least a digit. - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - var num, sfx string - if sizeStr[sep] != ' ' { - num = sizeStr[:sep+1] - sfx = sizeStr[sep+1:] - } else { - // Omit the space separator. - num = sizeStr[:sep] - sfx = sizeStr[sep+1:] - } - - size, err := strconv.ParseFloat(num, 64) - if err != nil { - return -1, err - } - // Backward compatibility: reject negative sizes. - if size < 0 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - if len(sfx) == 0 { - return int64(size), nil - } - - // Process the suffix. - - if len(sfx) > 3 { // Too long. - goto badSuffix - } - sfx = strings.ToLower(sfx) - // Trivial case: b suffix. - if sfx[0] == 'b' { - if len(sfx) > 1 { // no extra characters allowed after b. - goto badSuffix - } - return int64(size), nil - } - // A suffix from the map. - if mul, ok := uMap[sfx[0]]; ok { - size *= float64(mul) - } else { - goto badSuffix - } - - // The suffix may have extra "b" or "ib" (e.g. KiB or MB). - switch { - case len(sfx) == 2 && sfx[1] != 'b': - goto badSuffix - case len(sfx) == 3 && sfx[1:] != "ib": - goto badSuffix - } - - return int64(size), nil - -badSuffix: - return -1, fmt.Errorf("invalid suffix: '%s'", sfx) -} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go deleted file mode 100644 index fca0400cc8..0000000000 --- a/vendor/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,123 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - if *hard != -1 { - if soft == -1 { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) - } - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) - } - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml deleted file mode 100644 index bfc421200d..0000000000 --- a/vendor/github.com/felixge/httpsnoop/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - 1.8 diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt deleted file mode 100644 index e028b46a9b..0000000000 --- a/vendor/github.com/felixge/httpsnoop/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile deleted file mode 100644 index 2d84889aed..0000000000 --- a/vendor/github.com/felixge/httpsnoop/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -.PHONY: ci generate clean - -ci: clean generate - go test -v ./... - -generate: - go generate . - -clean: - rm -rf *_generated*.go diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md deleted file mode 100644 index ddcecd13e7..0000000000 --- a/vendor/github.com/felixge/httpsnoop/README.md +++ /dev/null @@ -1,95 +0,0 @@ -# httpsnoop - -Package httpsnoop provides an easy way to capture http related metrics (i.e. -response time, bytes written, and http status code) from your application's -http.Handlers. - -Doing this requires non-trivial wrapping of the http.ResponseWriter interface, -which is also exposed for users interested in a more low-level API. - -[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop) -[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop) - -## Usage Example - -```go -// myH is your app's http handler, perhaps a http.ServeMux or similar. -var myH http.Handler -// wrappedH wraps myH in order to log every request. -wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - m := httpsnoop.CaptureMetrics(myH, w, r) - log.Printf( - "%s %s (code=%d dt=%s written=%d)", - r.Method, - r.URL, - m.Code, - m.Duration, - m.Written, - ) -}) -http.ListenAndServe(":8080", wrappedH) -``` - -## Why this package exists - -Instrumenting an application's http.Handler is surprisingly difficult. - -However if you google for e.g. "capture ResponseWriter status code" you'll find -lots of advise and code examples that suggest it to be a fairly trivial -undertaking. Unfortunately everything I've seen so far has a high chance of -breaking your application. - -The main problem is that a `http.ResponseWriter` often implements additional -interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and -`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` -in your own struct that also implements the `http.ResponseWriter` interface -will hide the additional interfaces mentioned above. This has a high change of -introducing subtle bugs into any non-trivial application. - -Another approach I've seen people take is to return a struct that implements -all of the interfaces above. However, that's also problematic, because it's -difficult to fake some of these interfaces behaviors when the underlying -`http.ResponseWriter` doesn't have an implementation. It's also dangerous, -because an application may choose to operate differently, merely because it -detects the presence of these additional interfaces. - -This package solves this problem by checking which additional interfaces a -`http.ResponseWriter` implements, returning a wrapped version implementing the -exact same set of interfaces. - -Additionally this package properly handles edge cases such as `WriteHeader` not -being called, or called more than once, as well as concurrent calls to -`http.ResponseWriter` methods, and even calls happening after the wrapped -`ServeHTTP` has already returned. - -Unfortunately this package is not perfect either. It's possible that it is -still missing some interfaces provided by the go core (let me know if you find -one), and it won't work for applications adding their own interfaces into the -mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying -`http.ResponseWriter` and type-assert the result to its other interfaces. - -However, hopefully the explanation above has sufficiently scared you of rolling -your own solution to this problem. httpsnoop may still break your application, -but at least it tries to avoid it as much as possible. - -Anyway, the real problem here is that smuggling additional interfaces inside -`http.ResponseWriter` is a problematic design choice, but it probably goes as -deep as the Go language specification itself. But that's okay, I still prefer -Go over the alternatives ;). - -## Performance - -``` -BenchmarkBaseline-8 20000 94912 ns/op -BenchmarkCaptureMetrics-8 20000 95461 ns/op -``` - -As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an -overhead of ~500 ns per http request on my machine. However, the margin of -error appears to be larger than that, therefor it should be reasonable to -assume that the overhead introduced by `CaptureMetrics` is absolutely -negligible. - -## License - -MIT diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go deleted file mode 100644 index b77cc7c009..0000000000 --- a/vendor/github.com/felixge/httpsnoop/capture_metrics.go +++ /dev/null @@ -1,86 +0,0 @@ -package httpsnoop - -import ( - "io" - "net/http" - "time" -) - -// Metrics holds metrics captured from CaptureMetrics. -type Metrics struct { - // Code is the first http response code passed to the WriteHeader func of - // the ResponseWriter. If no such call is made, a default code of 200 is - // assumed instead. - Code int - // Duration is the time it took to execute the handler. - Duration time.Duration - // Written is the number of bytes successfully written by the Write or - // ReadFrom function of the ResponseWriter. ResponseWriters may also write - // data to their underlaying connection directly (e.g. headers), but those - // are not tracked. Therefor the number of Written bytes will usually match - // the size of the response body. - Written int64 -} - -// CaptureMetrics wraps the given hnd, executes it with the given w and r, and -// returns the metrics it captured from it. -func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { - return CaptureMetricsFn(w, func(ww http.ResponseWriter) { - hnd.ServeHTTP(ww, r) - }) -} - -// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the -// resulting metrics. This is very similar to CaptureMetrics (which is just -// sugar on top of this func), but is a more usable interface if your -// application doesn't use the Go http.Handler interface. -func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { - m := Metrics{Code: http.StatusOK} - m.CaptureMetrics(w, fn) - return m -} - -// CaptureMetrics wraps w and calls fn with the wrapped w and updates -// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, -// but allows one to customize starting Metrics object. -func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { - var ( - start = time.Now() - headerWritten bool - hooks = Hooks{ - WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { - return func(code int) { - next(code) - - if !headerWritten { - m.Code = code - headerWritten = true - } - } - }, - - Write: func(next WriteFunc) WriteFunc { - return func(p []byte) (int, error) { - n, err := next(p) - - m.Written += int64(n) - headerWritten = true - return n, err - } - }, - - ReadFrom: func(next ReadFromFunc) ReadFromFunc { - return func(src io.Reader) (int64, error) { - n, err := next(src) - - headerWritten = true - m.Written += n - return n, err - } - }, - } - ) - - fn(Wrap(w, hooks)) - m.Duration += time.Since(start) -} diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go deleted file mode 100644 index 203c35b3c6..0000000000 --- a/vendor/github.com/felixge/httpsnoop/docs.go +++ /dev/null @@ -1,10 +0,0 @@ -// Package httpsnoop provides an easy way to capture http related metrics (i.e. -// response time, bytes written, and http status code) from your application's -// http.Handlers. -// -// Doing this requires non-trivial wrapping of the http.ResponseWriter -// interface, which is also exposed for users interested in a more low-level -// API. -package httpsnoop - -//go:generate go run codegen/main.go diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go deleted file mode 100644 index 31cbdfb8ef..0000000000 --- a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go +++ /dev/null @@ -1,436 +0,0 @@ -// +build go1.8 -// Code generated by "httpsnoop/codegen"; DO NOT EDIT - -package httpsnoop - -import ( - "bufio" - "io" - "net" - "net/http" -) - -// HeaderFunc is part of the http.ResponseWriter interface. -type HeaderFunc func() http.Header - -// WriteHeaderFunc is part of the http.ResponseWriter interface. -type WriteHeaderFunc func(code int) - -// WriteFunc is part of the http.ResponseWriter interface. -type WriteFunc func(b []byte) (int, error) - -// FlushFunc is part of the http.Flusher interface. -type FlushFunc func() - -// CloseNotifyFunc is part of the http.CloseNotifier interface. -type CloseNotifyFunc func() <-chan bool - -// HijackFunc is part of the http.Hijacker interface. -type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) - -// ReadFromFunc is part of the io.ReaderFrom interface. -type ReadFromFunc func(src io.Reader) (int64, error) - -// PushFunc is part of the http.Pusher interface. -type PushFunc func(target string, opts *http.PushOptions) error - -// Hooks defines a set of method interceptors for methods included in -// http.ResponseWriter as well as some others. You can think of them as -// middleware for the function calls they target. See Wrap for more details. -type Hooks struct { - Header func(HeaderFunc) HeaderFunc - WriteHeader func(WriteHeaderFunc) WriteHeaderFunc - Write func(WriteFunc) WriteFunc - Flush func(FlushFunc) FlushFunc - CloseNotify func(CloseNotifyFunc) CloseNotifyFunc - Hijack func(HijackFunc) HijackFunc - ReadFrom func(ReadFromFunc) ReadFromFunc - Push func(PushFunc) PushFunc -} - -// Wrap returns a wrapped version of w that provides the exact same interface -// as w. Specifically if w implements any combination of: -// -// - http.Flusher -// - http.CloseNotifier -// - http.Hijacker -// - io.ReaderFrom -// - http.Pusher -// -// The wrapped version will implement the exact same combination. If no hooks -// are set, the wrapped version also behaves exactly as w. Hooks targeting -// methods not supported by w are ignored. Any other hooks will intercept the -// method they target and may modify the call's arguments and/or return values. -// The CaptureMetrics implementation serves as a working example for how the -// hooks can be used. -func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { - rw := &rw{w: w, h: hooks} - _, i0 := w.(http.Flusher) - _, i1 := w.(http.CloseNotifier) - _, i2 := w.(http.Hijacker) - _, i3 := w.(io.ReaderFrom) - _, i4 := w.(http.Pusher) - switch { - // combination 1/32 - case !i0 && !i1 && !i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - }{rw, rw} - // combination 2/32 - case !i0 && !i1 && !i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Pusher - }{rw, rw, rw} - // combination 3/32 - case !i0 && !i1 && !i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - io.ReaderFrom - }{rw, rw, rw} - // combination 4/32 - case !i0 && !i1 && !i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw} - // combination 5/32 - case !i0 && !i1 && i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Hijacker - }{rw, rw, rw} - // combination 6/32 - case !i0 && !i1 && i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Hijacker - http.Pusher - }{rw, rw, rw, rw} - // combination 7/32 - case !i0 && !i1 && i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 8/32 - case !i0 && !i1 && i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Hijacker - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 9/32 - case !i0 && i1 && !i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - }{rw, rw, rw} - // combination 10/32 - case !i0 && i1 && !i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Pusher - }{rw, rw, rw, rw} - // combination 11/32 - case !i0 && i1 && !i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 12/32 - case !i0 && i1 && !i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 13/32 - case !i0 && i1 && i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Hijacker - }{rw, rw, rw, rw} - // combination 14/32 - case !i0 && i1 && i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Hijacker - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 15/32 - case !i0 && i1 && i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 16/32 - case !i0 && i1 && i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Hijacker - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw, rw} - // combination 17/32 - case i0 && !i1 && !i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - }{rw, rw, rw} - // combination 18/32 - case i0 && !i1 && !i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Pusher - }{rw, rw, rw, rw} - // combination 19/32 - case i0 && !i1 && !i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 20/32 - case i0 && !i1 && !i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 21/32 - case i0 && !i1 && i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Hijacker - }{rw, rw, rw, rw} - // combination 22/32 - case i0 && !i1 && i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Hijacker - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 23/32 - case i0 && !i1 && i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 24/32 - case i0 && !i1 && i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Hijacker - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw, rw} - // combination 25/32 - case i0 && i1 && !i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - }{rw, rw, rw, rw} - // combination 26/32 - case i0 && i1 && !i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 27/32 - case i0 && i1 && !i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 28/32 - case i0 && i1 && !i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw, rw} - // combination 29/32 - case i0 && i1 && i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - }{rw, rw, rw, rw, rw} - // combination 30/32 - case i0 && i1 && i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - http.Pusher - }{rw, rw, rw, rw, rw, rw} - // combination 31/32 - case i0 && i1 && i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw, rw} - // combination 32/32 - case i0 && i1 && i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw, rw, rw} - } - panic("unreachable") -} - -type rw struct { - w http.ResponseWriter - h Hooks -} - -func (w *rw) Unwrap() http.ResponseWriter { - return w.w -} - -func (w *rw) Header() http.Header { - f := w.w.(http.ResponseWriter).Header - if w.h.Header != nil { - f = w.h.Header(f) - } - return f() -} - -func (w *rw) WriteHeader(code int) { - f := w.w.(http.ResponseWriter).WriteHeader - if w.h.WriteHeader != nil { - f = w.h.WriteHeader(f) - } - f(code) -} - -func (w *rw) Write(b []byte) (int, error) { - f := w.w.(http.ResponseWriter).Write - if w.h.Write != nil { - f = w.h.Write(f) - } - return f(b) -} - -func (w *rw) Flush() { - f := w.w.(http.Flusher).Flush - if w.h.Flush != nil { - f = w.h.Flush(f) - } - f() -} - -func (w *rw) CloseNotify() <-chan bool { - f := w.w.(http.CloseNotifier).CloseNotify - if w.h.CloseNotify != nil { - f = w.h.CloseNotify(f) - } - return f() -} - -func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { - f := w.w.(http.Hijacker).Hijack - if w.h.Hijack != nil { - f = w.h.Hijack(f) - } - return f() -} - -func (w *rw) ReadFrom(src io.Reader) (int64, error) { - f := w.w.(io.ReaderFrom).ReadFrom - if w.h.ReadFrom != nil { - f = w.h.ReadFrom(f) - } - return f(src) -} - -func (w *rw) Push(target string, opts *http.PushOptions) error { - f := w.w.(http.Pusher).Push - if w.h.Push != nil { - f = w.h.Push(f) - } - return f(target, opts) -} - -type Unwrapper interface { - Unwrap() http.ResponseWriter -} - -// Unwrap returns the underlying http.ResponseWriter from within zero or more -// layers of httpsnoop wrappers. -func Unwrap(w http.ResponseWriter) http.ResponseWriter { - if rw, ok := w.(Unwrapper); ok { - // recurse until rw.Unwrap() returns a non-Unwrapper - return Unwrap(rw.Unwrap()) - } else { - return w - } -} diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go deleted file mode 100644 index ab99c07c7a..0000000000 --- a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go +++ /dev/null @@ -1,278 +0,0 @@ -// +build !go1.8 -// Code generated by "httpsnoop/codegen"; DO NOT EDIT - -package httpsnoop - -import ( - "bufio" - "io" - "net" - "net/http" -) - -// HeaderFunc is part of the http.ResponseWriter interface. -type HeaderFunc func() http.Header - -// WriteHeaderFunc is part of the http.ResponseWriter interface. -type WriteHeaderFunc func(code int) - -// WriteFunc is part of the http.ResponseWriter interface. -type WriteFunc func(b []byte) (int, error) - -// FlushFunc is part of the http.Flusher interface. -type FlushFunc func() - -// CloseNotifyFunc is part of the http.CloseNotifier interface. -type CloseNotifyFunc func() <-chan bool - -// HijackFunc is part of the http.Hijacker interface. -type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) - -// ReadFromFunc is part of the io.ReaderFrom interface. -type ReadFromFunc func(src io.Reader) (int64, error) - -// Hooks defines a set of method interceptors for methods included in -// http.ResponseWriter as well as some others. You can think of them as -// middleware for the function calls they target. See Wrap for more details. -type Hooks struct { - Header func(HeaderFunc) HeaderFunc - WriteHeader func(WriteHeaderFunc) WriteHeaderFunc - Write func(WriteFunc) WriteFunc - Flush func(FlushFunc) FlushFunc - CloseNotify func(CloseNotifyFunc) CloseNotifyFunc - Hijack func(HijackFunc) HijackFunc - ReadFrom func(ReadFromFunc) ReadFromFunc -} - -// Wrap returns a wrapped version of w that provides the exact same interface -// as w. Specifically if w implements any combination of: -// -// - http.Flusher -// - http.CloseNotifier -// - http.Hijacker -// - io.ReaderFrom -// -// The wrapped version will implement the exact same combination. If no hooks -// are set, the wrapped version also behaves exactly as w. Hooks targeting -// methods not supported by w are ignored. Any other hooks will intercept the -// method they target and may modify the call's arguments and/or return values. -// The CaptureMetrics implementation serves as a working example for how the -// hooks can be used. -func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { - rw := &rw{w: w, h: hooks} - _, i0 := w.(http.Flusher) - _, i1 := w.(http.CloseNotifier) - _, i2 := w.(http.Hijacker) - _, i3 := w.(io.ReaderFrom) - switch { - // combination 1/16 - case !i0 && !i1 && !i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - }{rw, rw} - // combination 2/16 - case !i0 && !i1 && !i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - io.ReaderFrom - }{rw, rw, rw} - // combination 3/16 - case !i0 && !i1 && i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.Hijacker - }{rw, rw, rw} - // combination 4/16 - case !i0 && !i1 && i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 5/16 - case !i0 && i1 && !i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - }{rw, rw, rw} - // combination 6/16 - case !i0 && i1 && !i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 7/16 - case !i0 && i1 && i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Hijacker - }{rw, rw, rw, rw} - // combination 8/16 - case !i0 && i1 && i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 9/16 - case i0 && !i1 && !i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - }{rw, rw, rw} - // combination 10/16 - case i0 && !i1 && !i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 11/16 - case i0 && !i1 && i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Hijacker - }{rw, rw, rw, rw} - // combination 12/16 - case i0 && !i1 && i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 13/16 - case i0 && i1 && !i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - }{rw, rw, rw, rw} - // combination 14/16 - case i0 && i1 && !i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 15/16 - case i0 && i1 && i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - }{rw, rw, rw, rw, rw} - // combination 16/16 - case i0 && i1 && i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw, rw} - } - panic("unreachable") -} - -type rw struct { - w http.ResponseWriter - h Hooks -} - -func (w *rw) Unwrap() http.ResponseWriter { - return w.w -} - -func (w *rw) Header() http.Header { - f := w.w.(http.ResponseWriter).Header - if w.h.Header != nil { - f = w.h.Header(f) - } - return f() -} - -func (w *rw) WriteHeader(code int) { - f := w.w.(http.ResponseWriter).WriteHeader - if w.h.WriteHeader != nil { - f = w.h.WriteHeader(f) - } - f(code) -} - -func (w *rw) Write(b []byte) (int, error) { - f := w.w.(http.ResponseWriter).Write - if w.h.Write != nil { - f = w.h.Write(f) - } - return f(b) -} - -func (w *rw) Flush() { - f := w.w.(http.Flusher).Flush - if w.h.Flush != nil { - f = w.h.Flush(f) - } - f() -} - -func (w *rw) CloseNotify() <-chan bool { - f := w.w.(http.CloseNotifier).CloseNotify - if w.h.CloseNotify != nil { - f = w.h.CloseNotify(f) - } - return f() -} - -func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { - f := w.w.(http.Hijacker).Hijack - if w.h.Hijack != nil { - f = w.h.Hijack(f) - } - return f() -} - -func (w *rw) ReadFrom(src io.Reader) (int64, error) { - f := w.w.(io.ReaderFrom).ReadFrom - if w.h.ReadFrom != nil { - f = w.h.ReadFrom(f) - } - return f(src) -} - -type Unwrapper interface { - Unwrap() http.ResponseWriter -} - -// Unwrap returns the underlying http.ResponseWriter from within zero or more -// layers of httpsnoop wrappers. -func Unwrap(w http.ResponseWriter) http.ResponseWriter { - if rw, ok := w.(Unwrapper); ok { - // recurse until rw.Unwrap() returns a non-Unwrapper - return Unwrap(rw.Unwrap()) - } else { - return w - } -} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go deleted file mode 100644 index e52f0cd01e..0000000000 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ /dev/null @@ -1,804 +0,0 @@ -/* -Copyright 2021 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package funcr implements formatting of structured log messages and -// optionally captures the call site and timestamp. -// -// The simplest way to use it is via its implementation of a -// github.com/go-logr/logr.LogSink with output through an arbitrary -// "write" function. See New and NewJSON for details. -// -// # Custom LogSinks -// -// For users who need more control, a funcr.Formatter can be embedded inside -// your own custom LogSink implementation. This is useful when the LogSink -// needs to implement additional methods, for example. -// -// # Formatting -// -// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for -// values which are being logged. When rendering a struct, funcr will use Go's -// standard JSON tags (all except "string"). -package funcr - -import ( - "bytes" - "encoding" - "encoding/json" - "fmt" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - "time" - - "github.com/go-logr/logr" -) - -// New returns a logr.Logger which is implemented by an arbitrary function. -func New(fn func(prefix, args string), opts Options) logr.Logger { - return logr.New(newSink(fn, NewFormatter(opts))) -} - -// NewJSON returns a logr.Logger which is implemented by an arbitrary function -// and produces JSON output. -func NewJSON(fn func(obj string), opts Options) logr.Logger { - fnWrapper := func(_, obj string) { - fn(obj) - } - return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) -} - -// Underlier exposes access to the underlying logging function. Since -// callers only have a logr.Logger, they have to know which -// implementation is in use, so this interface is less of an -// abstraction and more of a way to test type conversion. -type Underlier interface { - GetUnderlying() func(prefix, args string) -} - -func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { - l := &fnlogger{ - Formatter: formatter, - write: fn, - } - // For skipping fnlogger.Info and fnlogger.Error. - l.Formatter.AddCallDepth(1) - return l -} - -// Options carries parameters which influence the way logs are generated. -type Options struct { - // LogCaller tells funcr to add a "caller" key to some or all log lines. - // This has some overhead, so some users might not want it. - LogCaller MessageClass - - // LogCallerFunc tells funcr to also log the calling function name. This - // has no effect if caller logging is not enabled (see Options.LogCaller). - LogCallerFunc bool - - // LogTimestamp tells funcr to add a "ts" key to log lines. This has some - // overhead, so some users might not want it. - LogTimestamp bool - - // TimestampFormat tells funcr how to render timestamps when LogTimestamp - // is enabled. If not specified, a default format will be used. For more - // details, see docs for Go's time.Layout. - TimestampFormat string - - // Verbosity tells funcr which V logs to produce. Higher values enable - // more logs. Info logs at or below this level will be written, while logs - // above this level will be discarded. - Verbosity int - - // RenderBuiltinsHook allows users to mutate the list of key-value pairs - // while a log line is being rendered. The kvList argument follows logr - // conventions - each pair of slice elements is comprised of a string key - // and an arbitrary value (verified and sanitized before calling this - // hook). The value returned must follow the same conventions. This hook - // can be used to audit or modify logged data. For example, you might want - // to prefix all of funcr's built-in keys with some string. This hook is - // only called for built-in (provided by funcr itself) key-value pairs. - // Equivalent hooks are offered for key-value pairs saved via - // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and - // for user-provided pairs (see RenderArgsHook). - RenderBuiltinsHook func(kvList []interface{}) []interface{} - - // RenderValuesHook is the same as RenderBuiltinsHook, except that it is - // only called for key-value pairs saved via logr.Logger.WithValues. See - // RenderBuiltinsHook for more details. - RenderValuesHook func(kvList []interface{}) []interface{} - - // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only - // called for key-value pairs passed directly to Info and Error. See - // RenderBuiltinsHook for more details. - RenderArgsHook func(kvList []interface{}) []interface{} - - // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct - // that contains a struct, etc.) it may log. Every time it finds a struct, - // slice, array, or map the depth is increased by one. When the maximum is - // reached, the value will be converted to a string indicating that the max - // depth has been exceeded. If this field is not specified, a default - // value will be used. - MaxLogDepth int -} - -// MessageClass indicates which category or categories of messages to consider. -type MessageClass int - -const ( - // None ignores all message classes. - None MessageClass = iota - // All considers all message classes. - All - // Info only considers info messages. - Info - // Error only considers error messages. - Error -) - -// fnlogger inherits some of its LogSink implementation from Formatter -// and just needs to add some glue code. -type fnlogger struct { - Formatter - write func(prefix, args string) -} - -func (l fnlogger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) - return &l -} - -func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { - l.Formatter.AddValues(kvList) - return &l -} - -func (l fnlogger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) - return &l -} - -func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { - prefix, args := l.FormatInfo(level, msg, kvList) - l.write(prefix, args) -} - -func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { - prefix, args := l.FormatError(err, msg, kvList) - l.write(prefix, args) -} - -func (l fnlogger) GetUnderlying() func(prefix, args string) { - return l.write -} - -// Assert conformance to the interfaces. -var _ logr.LogSink = &fnlogger{} -var _ logr.CallDepthLogSink = &fnlogger{} -var _ Underlier = &fnlogger{} - -// NewFormatter constructs a Formatter which emits a JSON-like key=value format. -func NewFormatter(opts Options) Formatter { - return newFormatter(opts, outputKeyValue) -} - -// NewFormatterJSON constructs a Formatter which emits strict JSON. -func NewFormatterJSON(opts Options) Formatter { - return newFormatter(opts, outputJSON) -} - -// Defaults for Options. -const defaultTimestampFormat = "2006-01-02 15:04:05.000000" -const defaultMaxLogDepth = 16 - -func newFormatter(opts Options, outfmt outputFormat) Formatter { - if opts.TimestampFormat == "" { - opts.TimestampFormat = defaultTimestampFormat - } - if opts.MaxLogDepth == 0 { - opts.MaxLogDepth = defaultMaxLogDepth - } - f := Formatter{ - outputFormat: outfmt, - prefix: "", - values: nil, - depth: 0, - opts: &opts, - } - return f -} - -// Formatter is an opaque struct which can be embedded in a LogSink -// implementation. It should be constructed with NewFormatter. Some of -// its methods directly implement logr.LogSink. -type Formatter struct { - outputFormat outputFormat - prefix string - values []interface{} - valuesStr string - depth int - opts *Options -} - -// outputFormat indicates which outputFormat to use. -type outputFormat int - -const ( - // outputKeyValue emits a JSON-like key=value format, but not strict JSON. - outputKeyValue outputFormat = iota - // outputJSON emits strict JSON. - outputJSON -) - -// PseudoStruct is a list of key-value pairs that gets logged as a struct. -type PseudoStruct []interface{} - -// render produces a log line, ready to use. -func (f Formatter) render(builtins, args []interface{}) string { - // Empirically bytes.Buffer is faster than strings.Builder for this. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - if f.outputFormat == outputJSON { - buf.WriteByte('{') - } - vals := builtins - if hook := f.opts.RenderBuiltinsHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape - continuing := len(builtins) > 0 - if len(f.valuesStr) > 0 { - if continuing { - if f.outputFormat == outputJSON { - buf.WriteByte(',') - } else { - buf.WriteByte(' ') - } - } - continuing = true - buf.WriteString(f.valuesStr) - } - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - if f.outputFormat == outputJSON { - buf.WriteByte('}') - } - return buf.String() -} - -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. -// -// This function returns a potentially modified version of kvList, which -// ensures that there is a value for every key (adding a value if needed) and -// that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { - // This logic overlaps with sanitize() but saves one type-cast per key, - // which can be measurable. - if len(kvList)%2 != 0 { - kvList = append(kvList, noValue) - } - for i := 0; i < len(kvList); i += 2 { - k, ok := kvList[i].(string) - if !ok { - k = f.nonStringKey(kvList[i]) - kvList[i] = k - } - v := kvList[i+1] - - if i > 0 || continuing { - if f.outputFormat == outputJSON { - buf.WriteByte(',') - } else { - // In theory the format could be something we don't understand. In - // practice, we control it, so it won't be. - buf.WriteByte(' ') - } - } - - if escapeKeys { - buf.WriteString(prettyString(k)) - } else { - // this is faster - buf.WriteByte('"') - buf.WriteString(k) - buf.WriteByte('"') - } - if f.outputFormat == outputJSON { - buf.WriteByte(':') - } else { - buf.WriteByte('=') - } - buf.WriteString(f.pretty(v)) - } - return kvList -} - -func (f Formatter) pretty(value interface{}) string { - return f.prettyWithFlags(value, 0, 0) -} - -const ( - flagRawStruct = 0x1 // do not print braces on structs -) - -// TODO: This is not fast. Most of the overhead goes here. -func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { - if depth > f.opts.MaxLogDepth { - return `""` - } - - // Handle types that take full control of logging. - if v, ok := value.(logr.Marshaler); ok { - // Replace the value with what the type wants to get logged. - // That then gets handled below via reflection. - value = invokeMarshaler(v) - } - - // Handle types that want to format themselves. - switch v := value.(type) { - case fmt.Stringer: - value = invokeStringer(v) - case error: - value = invokeError(v) - } - - // Handling the most common types without reflect is a small perf win. - switch v := value.(type) { - case bool: - return strconv.FormatBool(v) - case string: - return prettyString(v) - case int: - return strconv.FormatInt(int64(v), 10) - case int8: - return strconv.FormatInt(int64(v), 10) - case int16: - return strconv.FormatInt(int64(v), 10) - case int32: - return strconv.FormatInt(int64(v), 10) - case int64: - return strconv.FormatInt(int64(v), 10) - case uint: - return strconv.FormatUint(uint64(v), 10) - case uint8: - return strconv.FormatUint(uint64(v), 10) - case uint16: - return strconv.FormatUint(uint64(v), 10) - case uint32: - return strconv.FormatUint(uint64(v), 10) - case uint64: - return strconv.FormatUint(v, 10) - case uintptr: - return strconv.FormatUint(uint64(v), 10) - case float32: - return strconv.FormatFloat(float64(v), 'f', -1, 32) - case float64: - return strconv.FormatFloat(v, 'f', -1, 64) - case complex64: - return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` - case complex128: - return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` - case PseudoStruct: - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - v = f.sanitize(v) - if flags&flagRawStruct == 0 { - buf.WriteByte('{') - } - for i := 0; i < len(v); i += 2 { - if i > 0 { - buf.WriteByte(',') - } - k, _ := v[i].(string) // sanitize() above means no need to check success - // arbitrary keys might need escaping - buf.WriteString(prettyString(k)) - buf.WriteByte(':') - buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) - } - if flags&flagRawStruct == 0 { - buf.WriteByte('}') - } - return buf.String() - } - - buf := bytes.NewBuffer(make([]byte, 0, 256)) - t := reflect.TypeOf(value) - if t == nil { - return "null" - } - v := reflect.ValueOf(value) - switch t.Kind() { - case reflect.Bool: - return strconv.FormatBool(v.Bool()) - case reflect.String: - return prettyString(v.String()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(int64(v.Int()), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(uint64(v.Uint()), 10) - case reflect.Float32: - return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) - case reflect.Float64: - return strconv.FormatFloat(v.Float(), 'f', -1, 64) - case reflect.Complex64: - return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` - case reflect.Complex128: - return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` - case reflect.Struct: - if flags&flagRawStruct == 0 { - buf.WriteByte('{') - } - printComma := false // testing i>0 is not enough because of JSON omitted fields - for i := 0; i < t.NumField(); i++ { - fld := t.Field(i) - if fld.PkgPath != "" { - // reflect says this field is only defined for non-exported fields. - continue - } - if !v.Field(i).CanInterface() { - // reflect isn't clear exactly what this means, but we can't use it. - continue - } - name := "" - omitempty := false - if tag, found := fld.Tag.Lookup("json"); found { - if tag == "-" { - continue - } - if comma := strings.Index(tag, ","); comma != -1 { - if n := tag[:comma]; n != "" { - name = n - } - rest := tag[comma:] - if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { - omitempty = true - } - } else { - name = tag - } - } - if omitempty && isEmpty(v.Field(i)) { - continue - } - if printComma { - buf.WriteByte(',') - } - printComma = true // if we got here, we are rendering a field - if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { - buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) - continue - } - if name == "" { - name = fld.Name - } - // field names can't contain characters which need escaping - buf.WriteByte('"') - buf.WriteString(name) - buf.WriteByte('"') - buf.WriteByte(':') - buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) - } - if flags&flagRawStruct == 0 { - buf.WriteByte('}') - } - return buf.String() - case reflect.Slice, reflect.Array: - // If this is outputing as JSON make sure this isn't really a json.RawMessage. - // If so just emit "as-is" and don't pretty it as that will just print - // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. - if f.outputFormat == outputJSON { - if rm, ok := value.(json.RawMessage); ok { - // If it's empty make sure we emit an empty value as the array style would below. - if len(rm) > 0 { - buf.Write(rm) - } else { - buf.WriteString("null") - } - return buf.String() - } - } - buf.WriteByte('[') - for i := 0; i < v.Len(); i++ { - if i > 0 { - buf.WriteByte(',') - } - e := v.Index(i) - buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) - } - buf.WriteByte(']') - return buf.String() - case reflect.Map: - buf.WriteByte('{') - // This does not sort the map keys, for best perf. - it := v.MapRange() - i := 0 - for it.Next() { - if i > 0 { - buf.WriteByte(',') - } - // If a map key supports TextMarshaler, use it. - keystr := "" - if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { - txt, err := m.MarshalText() - if err != nil { - keystr = fmt.Sprintf("", err.Error()) - } else { - keystr = string(txt) - } - keystr = prettyString(keystr) - } else { - // prettyWithFlags will produce already-escaped values - keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) - if t.Key().Kind() != reflect.String { - // JSON only does string keys. Unlike Go's standard JSON, we'll - // convert just about anything to a string. - keystr = prettyString(keystr) - } - } - buf.WriteString(keystr) - buf.WriteByte(':') - buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) - i++ - } - buf.WriteByte('}') - return buf.String() - case reflect.Ptr, reflect.Interface: - if v.IsNil() { - return "null" - } - return f.prettyWithFlags(v.Elem().Interface(), 0, depth) - } - return fmt.Sprintf(`""`, t.Kind().String()) -} - -func prettyString(s string) string { - // Avoid escaping (which does allocations) if we can. - if needsEscape(s) { - return strconv.Quote(s) - } - b := bytes.NewBuffer(make([]byte, 0, 1024)) - b.WriteByte('"') - b.WriteString(s) - b.WriteByte('"') - return b.String() -} - -// needsEscape determines whether the input string needs to be escaped or not, -// without doing any allocations. -func needsEscape(s string) bool { - for _, r := range s { - if !strconv.IsPrint(r) || r == '\\' || r == '"' { - return true - } - } - return false -} - -func isEmpty(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Complex64, reflect.Complex128: - return v.Complex() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func invokeMarshaler(m logr.Marshaler) (ret interface{}) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return m.MarshalLog() -} - -func invokeStringer(s fmt.Stringer) (ret string) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return s.String() -} - -func invokeError(e error) (ret string) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return e.Error() -} - -// Caller represents the original call site for a log line, after considering -// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and -// Line fields will always be provided, while the Func field is optional. -// Users can set the render hook fields in Options to examine logged key-value -// pairs, one of which will be {"caller", Caller} if the Options.LogCaller -// field is enabled for the given MessageClass. -type Caller struct { - // File is the basename of the file for this call site. - File string `json:"file"` - // Line is the line number in the file for this call site. - Line int `json:"line"` - // Func is the function name for this call site, or empty if - // Options.LogCallerFunc is not enabled. - Func string `json:"function,omitempty"` -} - -func (f Formatter) caller() Caller { - // +1 for this frame, +1 for Info/Error. - pc, file, line, ok := runtime.Caller(f.depth + 2) - if !ok { - return Caller{"", 0, ""} - } - fn := "" - if f.opts.LogCallerFunc { - if fp := runtime.FuncForPC(pc); fp != nil { - fn = fp.Name() - } - } - - return Caller{filepath.Base(file), line, fn} -} - -const noValue = "" - -func (f Formatter) nonStringKey(v interface{}) string { - return fmt.Sprintf("", f.snippet(v)) -} - -// snippet produces a short snippet string of an arbitrary value. -func (f Formatter) snippet(v interface{}) string { - const snipLen = 16 - - snip := f.pretty(v) - if len(snip) > snipLen { - snip = snip[:snipLen] - } - return snip -} - -// sanitize ensures that a list of key-value pairs has a value for every key -// (adding a value if needed) and that each key is a string (substituting a key -// if needed). -func (f Formatter) sanitize(kvList []interface{}) []interface{} { - if len(kvList)%2 != 0 { - kvList = append(kvList, noValue) - } - for i := 0; i < len(kvList); i += 2 { - _, ok := kvList[i].(string) - if !ok { - kvList[i] = f.nonStringKey(kvList[i]) - } - } - return kvList -} - -// Init configures this Formatter from runtime info, such as the call depth -// imposed by logr itself. -// Note that this receiver is a pointer, so depth can be saved. -func (f *Formatter) Init(info logr.RuntimeInfo) { - f.depth += info.CallDepth -} - -// Enabled checks whether an info message at the given level should be logged. -func (f Formatter) Enabled(level int) bool { - return level <= f.opts.Verbosity -} - -// GetDepth returns the current depth of this Formatter. This is useful for -// implementations which do their own caller attribution. -func (f Formatter) GetDepth() int { - return f.depth -} - -// FormatInfo renders an Info log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is -// configured for JSON. -func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf - prefix = f.prefix - if f.outputFormat == outputJSON { - args = append(args, "logger", prefix) - prefix = "" - } - if f.opts.LogTimestamp { - args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) - } - if policy := f.opts.LogCaller; policy == All || policy == Info { - args = append(args, "caller", f.caller()) - } - args = append(args, "level", level, "msg", msg) - return prefix, f.render(args, kvList) -} - -// FormatError renders an Error log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is -// configured for JSON. -func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf - prefix = f.prefix - if f.outputFormat == outputJSON { - args = append(args, "logger", prefix) - prefix = "" - } - if f.opts.LogTimestamp { - args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) - } - if policy := f.opts.LogCaller; policy == All || policy == Error { - args = append(args, "caller", f.caller()) - } - args = append(args, "msg", msg) - var loggableErr interface{} - if err != nil { - loggableErr = err.Error() - } - args = append(args, "error", loggableErr) - return f.prefix, f.render(args, kvList) -} - -// AddName appends the specified name. funcr uses '/' characters to separate -// name elements. Callers should not pass '/' in the provided name string, but -// this library does not actually enforce that. -func (f *Formatter) AddName(name string) { - if len(f.prefix) > 0 { - f.prefix += "/" - } - f.prefix += name -} - -// AddValues adds key-value pairs to the set of saved values to be logged with -// each log line. -func (f *Formatter) AddValues(kvList []interface{}) { - // Three slice args forces a copy. - n := len(f.values) - f.values = append(f.values[:n:n], kvList...) - - vals := f.values - if hook := f.opts.RenderValuesHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - - // Pre-render values, so we don't have to do it on each Info/Error call. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys - f.valuesStr = buf.String() -} - -// AddCallDepth increases the number of stack-frames to skip when attributing -// the log line to a file and line. -func (f *Formatter) AddCallDepth(depth int) { - f.depth += depth -} diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/github.com/go-logr/stdr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md deleted file mode 100644 index 5158667890..0000000000 --- a/vendor/github.com/go-logr/stdr/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Minimal Go logging using logr and Go's standard library - -[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) - -This package implements the [logr interface](https://github.com/go-logr/logr) -in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go deleted file mode 100644 index 93a8aab51b..0000000000 --- a/vendor/github.com/go-logr/stdr/stdr.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package stdr implements github.com/go-logr/logr.Logger in terms of -// Go's standard log package. -package stdr - -import ( - "log" - "os" - - "github.com/go-logr/logr" - "github.com/go-logr/logr/funcr" -) - -// The global verbosity level. See SetVerbosity(). -var globalVerbosity int - -// SetVerbosity sets the global level against which all info logs will be -// compared. If this is greater than or equal to the "V" of the logger, the -// message will be logged. A higher value here means more logs will be written. -// The previous verbosity value is returned. This is not concurrent-safe - -// callers must be sure to call it from only one goroutine. -func SetVerbosity(v int) int { - old := globalVerbosity - globalVerbosity = v - return old -} - -// New returns a logr.Logger which is implemented by Go's standard log package, -// or something like it. If std is nil, this will use a default logger -// instead. -// -// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -func New(std StdLogger) logr.Logger { - return NewWithOptions(std, Options{}) -} - -// NewWithOptions returns a logr.Logger which is implemented by Go's standard -// log package, or something like it. See New for details. -func NewWithOptions(std StdLogger, opts Options) logr.Logger { - if std == nil { - // Go's log.Default() is only available in 1.16 and higher. - std = log.New(os.Stderr, "", log.LstdFlags) - } - - if opts.Depth < 0 { - opts.Depth = 0 - } - - fopts := funcr.Options{ - LogCaller: funcr.MessageClass(opts.LogCaller), - } - - sl := &logger{ - Formatter: funcr.NewFormatter(fopts), - std: std, - } - - // For skipping our own logger.Info/Error. - sl.Formatter.AddCallDepth(1 + opts.Depth) - - return logr.New(sl) -} - -// Options carries parameters which influence the way logs are generated. -type Options struct { - // Depth biases the assumed number of call frames to the "true" caller. - // This is useful when the calling code calls a function which then calls - // stdr (e.g. a logging shim to another API). Values less than zero will - // be treated as zero. - Depth int - - // LogCaller tells stdr to add a "caller" key to some or all log lines. - // Go's log package has options to log this natively, too. - LogCaller MessageClass - - // TODO: add an option to log the date/time -} - -// MessageClass indicates which category or categories of messages to consider. -type MessageClass int - -const ( - // None ignores all message classes. - None MessageClass = iota - // All considers all message classes. - All - // Info only considers info messages. - Info - // Error only considers error messages. - Error -) - -// StdLogger is the subset of the Go stdlib log.Logger API that is needed for -// this adapter. -type StdLogger interface { - // Output is the same as log.Output and log.Logger.Output. - Output(calldepth int, logline string) error -} - -type logger struct { - funcr.Formatter - std StdLogger -} - -var _ logr.LogSink = &logger{} -var _ logr.CallDepthLogSink = &logger{} - -func (l logger) Enabled(level int) bool { - return globalVerbosity >= level -} - -func (l logger) Info(level int, msg string, kvList ...interface{}) { - prefix, args := l.FormatInfo(level, msg, kvList) - if prefix != "" { - args = prefix + ": " + args - } - _ = l.std.Output(l.Formatter.GetDepth()+1, args) -} - -func (l logger) Error(err error, msg string, kvList ...interface{}) { - prefix, args := l.FormatError(err, msg, kvList) - if prefix != "" { - args = prefix + ": " + args - } - _ = l.std.Output(l.Formatter.GetDepth()+1, args) -} - -func (l logger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) - return &l -} - -func (l logger) WithValues(kvList ...interface{}) logr.LogSink { - l.Formatter.AddValues(kvList) - return &l -} - -func (l logger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) - return &l -} - -// Underlier exposes access to the underlying logging implementation. Since -// callers only have a logr.Logger, they have to know which implementation is -// in use, so this interface is less of an abstraction and more of way to test -// type conversion. -type Underlier interface { - GetUnderlying() StdLogger -} - -// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger -// is itself an interface, the result may or may not be a Go log.Logger. -func (l logger) GetUnderlying() StdLogger { - return l.std -} diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE deleted file mode 100644 index 9fdc20fdb6..0000000000 --- a/vendor/github.com/opencontainers/image-spec/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2016 The Linux Foundation. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go deleted file mode 100644 index 581cf7cdfa..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -const ( - // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). - AnnotationCreated = "org.opencontainers.image.created" - - // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). - AnnotationAuthors = "org.opencontainers.image.authors" - - // AnnotationURL is the annotation key for the URL to find more information on the image. - AnnotationURL = "org.opencontainers.image.url" - - // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. - AnnotationDocumentation = "org.opencontainers.image.documentation" - - // AnnotationSource is the annotation key for the URL to get source code for building the image. - AnnotationSource = "org.opencontainers.image.source" - - // AnnotationVersion is the annotation key for the version of the packaged software. - // The version MAY match a label or tag in the source code repository. - // The version MAY be Semantic versioning-compatible. - AnnotationVersion = "org.opencontainers.image.version" - - // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. - AnnotationRevision = "org.opencontainers.image.revision" - - // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. - AnnotationVendor = "org.opencontainers.image.vendor" - - // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. - AnnotationLicenses = "org.opencontainers.image.licenses" - - // AnnotationRefName is the annotation key for the name of the reference for a target. - // SHOULD only be considered valid when on descriptors on `index.json` within image layout. - AnnotationRefName = "org.opencontainers.image.ref.name" - - // AnnotationTitle is the annotation key for the human-readable title of the image. - AnnotationTitle = "org.opencontainers.image.title" - - // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. - AnnotationDescription = "org.opencontainers.image.description" - - // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. - AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" - - // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. - AnnotationBaseImageName = "org.opencontainers.image.base.name" -) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go deleted file mode 100644 index 36b0aeb8f1..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "time" - - digest "github.com/opencontainers/go-digest" -) - -// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. -type ImageConfig struct { - // User defines the username or UID which the process in the container should run as. - User string `json:"User,omitempty"` - - // ExposedPorts a set of ports to expose from a container running this image. - ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` - - // Env is a list of environment variables to be used in a container. - Env []string `json:"Env,omitempty"` - - // Entrypoint defines a list of arguments to use as the command to execute when the container starts. - Entrypoint []string `json:"Entrypoint,omitempty"` - - // Cmd defines the default arguments to the entrypoint of the container. - Cmd []string `json:"Cmd,omitempty"` - - // Volumes is a set of directories describing where the process is likely write data specific to a container instance. - Volumes map[string]struct{} `json:"Volumes,omitempty"` - - // WorkingDir sets the current working directory of the entrypoint process in the container. - WorkingDir string `json:"WorkingDir,omitempty"` - - // Labels contains arbitrary metadata for the container. - Labels map[string]string `json:"Labels,omitempty"` - - // StopSignal contains the system call signal that will be sent to the container to exit. - StopSignal string `json:"StopSignal,omitempty"` - - // ArgsEscaped - // - // Deprecated: This field is present only for legacy compatibility with - // Docker and should not be used by new image builders. It is used by Docker - // for Windows images to indicate that the `Entrypoint` or `Cmd` or both, - // contains only a single element array, that is a pre-escaped, and combined - // into a single string `CommandLine`. If `true` the value in `Entrypoint` or - // `Cmd` should be used as-is to avoid double escaping. - // https://github.com/opencontainers/image-spec/pull/892 - ArgsEscaped bool `json:"ArgsEscaped,omitempty"` -} - -// RootFS describes a layer content addresses -type RootFS struct { - // Type is the type of the rootfs. - Type string `json:"type"` - - // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. - DiffIDs []digest.Digest `json:"diff_ids"` -} - -// History describes the history of a layer. -type History struct { - // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. - Created *time.Time `json:"created,omitempty"` - - // CreatedBy is the command which created the layer. - CreatedBy string `json:"created_by,omitempty"` - - // Author is the author of the build point. - Author string `json:"author,omitempty"` - - // Comment is a custom message set when creating the layer. - Comment string `json:"comment,omitempty"` - - // EmptyLayer is used to mark if the history item created a filesystem diff. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Image is the JSON structure which describes some basic information about the image. -// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. -type Image struct { - // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. - Created *time.Time `json:"created,omitempty"` - - // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. - Author string `json:"author,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - Platform - - // Config defines the execution parameters which should be used as a base when running a container using the image. - Config ImageConfig `json:"config,omitempty"` - - // RootFS references the layer content addresses used by the image. - RootFS RootFS `json:"rootfs"` - - // History describes the history of each layer. - History []History `json:"history,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go deleted file mode 100644 index 1881b11814..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016-2022 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import digest "github.com/opencontainers/go-digest" - -// Descriptor describes the disposition of targeted content. -// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype -// when marshalled to JSON. -type Descriptor struct { - // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType"` - - // Digest is the digest of the targeted content. - Digest digest.Digest `json:"digest"` - - // Size specifies the size in bytes of the blob. - Size int64 `json:"size"` - - // URLs specifies a list of URLs from which this object MAY be downloaded - URLs []string `json:"urls,omitempty"` - - // Annotations contains arbitrary metadata relating to the targeted content. - Annotations map[string]string `json:"annotations,omitempty"` - - // Data is an embedding of the targeted content. This is encoded as a base64 - // string when marshalled to JSON (automatically, by encoding/json). If - // present, Data can be used directly to avoid fetching the targeted content. - Data []byte `json:"data,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - // - // This should only be used when referring to a manifest. - Platform *Platform `json:"platform,omitempty"` - - // ArtifactType is the IANA media type of this artifact. - ArtifactType string `json:"artifactType,omitempty"` -} - -// Platform describes the platform which the image in the manifest runs on. -type Platform struct { - // Architecture field specifies the CPU architecture, for example - // `amd64` or `ppc64le`. - Architecture string `json:"architecture"` - - // OS specifies the operating system, for example `linux` or `windows`. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example on Windows `10.0.14393.1066`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` - - // Variant is an optional field specifying a variant of the CPU, for - // example `v7` to specify ARMv7 when architecture is `arm`. - Variant string `json:"variant,omitempty"` -} - -// DescriptorEmptyJSON is the descriptor of a blob with content of `{}`. -var DescriptorEmptyJSON = Descriptor{ - MediaType: MediaTypeEmptyJSON, - Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, - Size: 2, - Data: []byte(`{}`), -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go deleted file mode 100644 index e2bed9d4e4..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import "github.com/opencontainers/image-spec/specs-go" - -// Index references manifests for various platforms. -// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. -type Index struct { - specs.Versioned - - // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` - MediaType string `json:"mediaType,omitempty"` - - // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. - ArtifactType string `json:"artifactType,omitempty"` - - // Manifests references platform specific manifests. - Manifests []Descriptor `json:"manifests"` - - // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. - Subject *Descriptor `json:"subject,omitempty"` - - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go deleted file mode 100644 index c5503cb305..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -const ( - // ImageLayoutFile is the file name containing ImageLayout in an OCI Image Layout - ImageLayoutFile = "oci-layout" - // ImageLayoutVersion is the version of ImageLayout - ImageLayoutVersion = "1.0.0" - // ImageIndexFile is the file name of the entry point for references and descriptors in an OCI Image Layout - ImageIndexFile = "index.json" - // ImageBlobsDir is the directory name containing content addressable blobs in an OCI Image Layout - ImageBlobsDir = "blobs" -) - -// ImageLayout is the structure in the "oci-layout" file, found in the root -// of an OCI Image-layout directory. -type ImageLayout struct { - Version string `json:"imageLayoutVersion"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go deleted file mode 100644 index 26fec52a6b..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016-2022 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import "github.com/opencontainers/image-spec/specs-go" - -// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. -type Manifest struct { - specs.Versioned - - // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` - MediaType string `json:"mediaType,omitempty"` - - // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. - ArtifactType string `json:"artifactType,omitempty"` - - // Config references a configuration object for a container, by digest. - // The referenced configuration object is a JSON blob that the runtime uses to set up the container. - Config Descriptor `json:"config"` - - // Layers is an indexed list of layers referenced by the manifest. - Layers []Descriptor `json:"layers"` - - // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. - Subject *Descriptor `json:"subject,omitempty"` - - // Annotations contains arbitrary metadata for the image manifest. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go deleted file mode 100644 index 892ba3de9d..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -const ( - // MediaTypeDescriptor specifies the media type for a content descriptor. - MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" - - // MediaTypeLayoutHeader specifies the media type for the oci-layout. - MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" - - // MediaTypeImageManifest specifies the media type for an image manifest. - MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" - - // MediaTypeImageIndex specifies the media type for an image index. - MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" - - // MediaTypeImageLayer is the media type used for layers referenced by the manifest. - MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" - - // MediaTypeImageLayerGzip is the media type used for gzipped layers - // referenced by the manifest. - MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" - - // MediaTypeImageLayerZstd is the media type used for zstd compressed - // layers referenced by the manifest. - MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" - - // MediaTypeImageLayerNonDistributable is the media type for layers referenced by - // the manifest but with distribution restrictions. - // - // Deprecated: Non-distributable layers are deprecated, and not recommended - // for future use. Implementations SHOULD NOT produce new non-distributable - // layers. - // https://github.com/opencontainers/image-spec/pull/965 - MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" - - // MediaTypeImageLayerNonDistributableGzip is the media type for - // gzipped layers referenced by the manifest but with distribution - // restrictions. - // - // Deprecated: Non-distributable layers are deprecated, and not recommended - // for future use. Implementations SHOULD NOT produce new non-distributable - // layers. - // https://github.com/opencontainers/image-spec/pull/965 - MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" - - // MediaTypeImageLayerNonDistributableZstd is the media type for zstd - // compressed layers referenced by the manifest but with distribution - // restrictions. - // - // Deprecated: Non-distributable layers are deprecated, and not recommended - // for future use. Implementations SHOULD NOT produce new non-distributable - // layers. - // https://github.com/opencontainers/image-spec/pull/965 - MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" - - // MediaTypeImageConfig specifies the media type for the image configuration. - MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - - // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value `{}` - MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" -) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go deleted file mode 100644 index 11e09b5846..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package specs - -import "fmt" - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 1 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 1 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc.5" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go deleted file mode 100644 index 58a1510f33..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package specs - -// Versioned provides a struct with the manifest schemaVersion and mediaType. -// Incoming content with unknown schema version can be decoded against this -// struct to check the version. -type Versioned struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go deleted file mode 100644 index 0816b9f5da..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otelhttp - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// DefaultClient is the default Client and is used by Get, Head, Post and PostForm. -// Please be careful of intitialization order - for example, if you change -// the global propagator, the DefaultClient might still be using the old one -var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} - -// Get is a convenient replacement for http.Get that adds a span around the request. -func Get(ctx context.Context, url string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", url, nil) - if err != nil { - return nil, err - } - return DefaultClient.Do(req) -} - -// Head is a convenient replacement for http.Head that adds a span around the request. -func Head(ctx context.Context, url string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil) - if err != nil { - return nil, err - } - return DefaultClient.Do(req) -} - -// Post is a convenient replacement for http.Post that adds a span around the request. -func Post(ctx context.Context, url, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", contentType) - return DefaultClient.Do(req) -} - -// PostForm is a convenient replacement for http.PostForm that adds a span around the request. -func PostForm(ctx context.Context, url string, data url.Values) (resp *http.Response, err error) { - return Post(ctx, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go deleted file mode 100644 index dcc59449e4..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otelhttp - -import ( - "net/http" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// Attribute keys that can be added to a span. -const ( - ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read - ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded) - WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written - WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) -) - -// Server HTTP metrics -const ( - RequestCount = "http.server.request_count" // Incoming request count total - RequestContentLength = "http.server.request_content_length" // Incoming request bytes total - ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total - ServerLatency = "http.server.duration" // Incoming end to end duration, microseconds -) - -// Filter is a predicate used to determine whether a given http.request should -// be traced. A Filter must return true if the request should be traced. -type Filter func(*http.Request) bool - -func newTracer(tp trace.TracerProvider) trace.Tracer { - return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(SemVersion())) -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go deleted file mode 100644 index 0c18c11104..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otelhttp - -import ( - "context" - "net/http" - "net/http/httptrace" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" -) - -const ( - instrumentationName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" -) - -// config represents the configuration options available for the http.Handler -// and http.Transport types. -type config struct { - Tracer trace.Tracer - Meter metric.Meter - Propagators propagation.TextMapPropagator - SpanStartOptions []trace.SpanStartOption - ReadEvent bool - WriteEvent bool - Filters []Filter - SpanNameFormatter func(string, *http.Request) string - ClientTrace func(context.Context) *httptrace.ClientTrace - - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider -} - -// Option interface used for setting optional config properties. -type Option interface { - apply(*config) -} - -type optionFunc func(*config) - -func (o optionFunc) apply(c *config) { - o(c) -} - -// newConfig creates a new config struct and applies opts to it. -func newConfig(opts ...Option) *config { - c := &config{ - Propagators: otel.GetTextMapPropagator(), - MeterProvider: global.GetMeterProvider(), - } - for _, opt := range opts { - opt.apply(c) - } - - // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. - if c.TracerProvider != nil { - c.Tracer = newTracer(c.TracerProvider) - } - - c.Meter = c.MeterProvider.Meter( - instrumentationName, - metric.WithInstrumentationVersion(SemVersion()), - ) - - return c -} - -// WithTracerProvider specifies a tracer provider to use for creating a tracer. -// If none is specified, the global provider is used. -func WithTracerProvider(provider trace.TracerProvider) Option { - return optionFunc(func(cfg *config) { - if provider != nil { - cfg.TracerProvider = provider - } - }) -} - -// WithMeterProvider specifies a meter provider to use for creating a meter. -// If none is specified, the global provider is used. -func WithMeterProvider(provider metric.MeterProvider) Option { - return optionFunc(func(cfg *config) { - if provider != nil { - cfg.MeterProvider = provider - } - }) -} - -// WithPublicEndpoint configures the Handler to link the span with an incoming -// span context. If this option is not provided, then the association is a child -// association instead of a link. -func WithPublicEndpoint() Option { - return optionFunc(func(c *config) { - c.SpanStartOptions = append(c.SpanStartOptions, trace.WithNewRoot()) - }) -} - -// WithPropagators configures specific propagators. If this -// option isn't specified, then the global TextMapPropagator is used. -func WithPropagators(ps propagation.TextMapPropagator) Option { - return optionFunc(func(c *config) { - if ps != nil { - c.Propagators = ps - } - }) -} - -// WithSpanOptions configures an additional set of -// trace.SpanOptions, which are applied to each new span. -func WithSpanOptions(opts ...trace.SpanStartOption) Option { - return optionFunc(func(c *config) { - c.SpanStartOptions = append(c.SpanStartOptions, opts...) - }) -} - -// WithFilter adds a filter to the list of filters used by the handler. -// If any filter indicates to exclude a request then the request will not be -// traced. All filters must allow a request to be traced for a Span to be created. -// If no filters are provided then all requests are traced. -// Filters will be invoked for each processed request, it is advised to make them -// simple and fast. -func WithFilter(f Filter) Option { - return optionFunc(func(c *config) { - c.Filters = append(c.Filters, f) - }) -} - -type event int - -// Different types of events that can be recorded, see WithMessageEvents -const ( - ReadEvents event = iota - WriteEvents -) - -// WithMessageEvents configures the Handler to record the specified events -// (span.AddEvent) on spans. By default only summary attributes are added at the -// end of the request. -// -// Valid events are: -// * ReadEvents: Record the number of bytes read after every http.Request.Body.Read -// using the ReadBytesKey -// * WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write -// using the WriteBytesKey -func WithMessageEvents(events ...event) Option { - return optionFunc(func(c *config) { - for _, e := range events { - switch e { - case ReadEvents: - c.ReadEvent = true - case WriteEvents: - c.WriteEvent = true - } - } - }) -} - -// WithSpanNameFormatter takes a function that will be called on every -// request and the returned string will become the Span Name -func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { - return optionFunc(func(c *config) { - c.SpanNameFormatter = f - }) -} - -// WithClientTrace takes a function that returns client trace instance that will be -// applied to the requests sent through the otelhttp Transport. -func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { - return optionFunc(func(c *config) { - c.ClientTrace = f - }) -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go deleted file mode 100644 index 38c7f01c71..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package otelhttp provides an http.Handler and functions that are intended -// to be used to add tracing by wrapping existing handlers (with Handler) and -// routes WithRouteTag. -package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go deleted file mode 100644 index dbba925dfa..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otelhttp - -import ( - "io" - "net/http" - "time" - - "github.com/felixge/httpsnoop" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" - "go.opentelemetry.io/otel/trace" -) - -var _ http.Handler = &Handler{} - -// Handler is http middleware that corresponds to the http.Handler interface and -// is designed to wrap a http.Mux (or equivalent), while individual routes on -// the mux are wrapped with WithRouteTag. A Handler will add various attributes -// to the span using the attribute.Keys defined in this package. -type Handler struct { - operation string - handler http.Handler - - tracer trace.Tracer - meter metric.Meter - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - readEvent bool - writeEvent bool - filters []Filter - spanNameFormatter func(string, *http.Request) string - counters map[string]metric.Int64Counter - valueRecorders map[string]metric.Float64Histogram -} - -func defaultHandlerFormatter(operation string, _ *http.Request) string { - return operation -} - -// NewHandler wraps the passed handler, functioning like middleware, in a span -// named after the operation and with any provided Options. -func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { - h := Handler{ - handler: handler, - operation: operation, - } - - defaultOpts := []Option{ - WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)), - WithSpanNameFormatter(defaultHandlerFormatter), - } - - c := newConfig(append(defaultOpts, opts...)...) - h.configure(c) - h.createMeasures() - - return &h -} - -func (h *Handler) configure(c *config) { - h.tracer = c.Tracer - h.meter = c.Meter - h.propagators = c.Propagators - h.spanStartOptions = c.SpanStartOptions - h.readEvent = c.ReadEvent - h.writeEvent = c.WriteEvent - h.filters = c.Filters - h.spanNameFormatter = c.SpanNameFormatter -} - -func handleErr(err error) { - if err != nil { - otel.Handle(err) - } -} - -func (h *Handler) createMeasures() { - h.counters = make(map[string]metric.Int64Counter) - h.valueRecorders = make(map[string]metric.Float64Histogram) - - requestBytesCounter, err := h.meter.NewInt64Counter(RequestContentLength) - handleErr(err) - - responseBytesCounter, err := h.meter.NewInt64Counter(ResponseContentLength) - handleErr(err) - - serverLatencyMeasure, err := h.meter.NewFloat64Histogram(ServerLatency) - handleErr(err) - - h.counters[RequestContentLength] = requestBytesCounter - h.counters[ResponseContentLength] = responseBytesCounter - h.valueRecorders[ServerLatency] = serverLatencyMeasure -} - -// ServeHTTP serves HTTP requests (http.Handler) -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - requestStartTime := time.Now() - for _, f := range h.filters { - if !f(r) { - // Simply pass through to the handler if a filter rejects the request - h.handler.ServeHTTP(w, r) - return - } - } - - opts := append([]trace.SpanStartOption{ - trace.WithAttributes(semconv.NetAttributesFromHTTPRequest("tcp", r)...), - trace.WithAttributes(semconv.EndUserAttributesFromHTTPRequest(r)...), - trace.WithAttributes(semconv.HTTPServerAttributesFromHTTPRequest(h.operation, "", r)...), - }, h.spanStartOptions...) // start with the configured options - - tracer := h.tracer - - if tracer == nil { - if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { - tracer = newTracer(span.TracerProvider()) - } else { - tracer = newTracer(otel.GetTracerProvider()) - } - } - - ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) - ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) - defer span.End() - - readRecordFunc := func(int64) {} - if h.readEvent { - readRecordFunc = func(n int64) { - span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n))) - } - } - - var bw bodyWrapper - // if request body is nil we don't want to mutate the body as it will affect - // the identity of it in a unforeseeable way because we assert ReadCloser - // fullfills a certain interface and it is indeed nil. - if r.Body != nil { - bw.ReadCloser = r.Body - bw.record = readRecordFunc - r.Body = &bw - } - - writeRecordFunc := func(int64) {} - if h.writeEvent { - writeRecordFunc = func(n int64) { - span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n))) - } - } - - rww := &respWriterWrapper{ResponseWriter: w, record: writeRecordFunc, ctx: ctx, props: h.propagators} - - // Wrap w to use our ResponseWriter methods while also exposing - // other interfaces that w may implement (http.CloseNotifier, - // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom). - - w = httpsnoop.Wrap(w, httpsnoop.Hooks{ - Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc { - return rww.Header - }, - Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { - return rww.Write - }, - WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { - return rww.WriteHeader - }, - }) - - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) - - h.handler.ServeHTTP(w, r.WithContext(ctx)) - - setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) - - // Add metrics - attributes := append(labeler.Get(), semconv.HTTPServerMetricAttributesFromHTTPRequest(h.operation, r)...) - h.counters[RequestContentLength].Add(ctx, bw.read, attributes...) - h.counters[ResponseContentLength].Add(ctx, rww.written, attributes...) - - // Use floating point division here for higher precision (instead of Millisecond method). - elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - - h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, attributes...) -} - -func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { - attributes := []attribute.KeyValue{} - - // TODO: Consider adding an event after each read and write, possibly as an - // option (defaulting to off), so as to not create needlessly verbose spans. - if read > 0 { - attributes = append(attributes, ReadBytesKey.Int64(read)) - } - if rerr != nil && rerr != io.EOF { - attributes = append(attributes, ReadErrorKey.String(rerr.Error())) - } - if wrote > 0 { - attributes = append(attributes, WroteBytesKey.Int64(wrote)) - } - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...) - span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(statusCode)) - } - if werr != nil && werr != io.EOF { - attributes = append(attributes, WriteErrorKey.String(werr.Error())) - } - span.SetAttributes(attributes...) -} - -// WithRouteTag annotates a span with the provided route name using the -// RouteKey Tag. -func WithRouteTag(route string, h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - span := trace.SpanFromContext(r.Context()) - span.SetAttributes(semconv.HTTPRouteKey.String(route)) - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go deleted file mode 100644 index 7b7d1c0ace..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otelhttp - -import ( - "context" - "sync" - - "go.opentelemetry.io/otel/attribute" -) - -// Labeler is used to allow instrumented HTTP handlers to add custom attributes to -// the metrics recorded by the net/http instrumentation. -type Labeler struct { - mu sync.Mutex - attributes []attribute.KeyValue -} - -// Add attributes to a Labeler. -func (l *Labeler) Add(ls ...attribute.KeyValue) { - l.mu.Lock() - defer l.mu.Unlock() - l.attributes = append(l.attributes, ls...) -} - -// Get returns a copy of the attributes added to the Labeler. -func (l *Labeler) Get() []attribute.KeyValue { - l.mu.Lock() - defer l.mu.Unlock() - ret := make([]attribute.KeyValue, len(l.attributes)) - copy(ret, l.attributes) - return ret -} - -type labelerContextKeyType int - -const lablelerContextKey labelerContextKeyType = 0 - -func injectLabeler(ctx context.Context, l *Labeler) context.Context { - return context.WithValue(ctx, lablelerContextKey, l) -} - -// LabelerFromContext retrieves a Labeler instance from the provided context if -// one is available. If no Labeler was found in the provided context a new, empty -// Labeler is returned and the second return value is false. In this case it is -// safe to use the Labeler but any attributes added to it will not be used. -func LabelerFromContext(ctx context.Context) (*Labeler, bool) { - l, ok := ctx.Value(lablelerContextKey).(*Labeler) - if !ok { - l = &Labeler{} - } - return l, ok -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go deleted file mode 100644 index 121ad99b0a..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otelhttp - -import ( - "context" - "io" - "net/http" - "net/http/httptrace" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" - "go.opentelemetry.io/otel/trace" -) - -// Transport implements the http.RoundTripper interface and wraps -// outbound HTTP(S) requests with a span. -type Transport struct { - rt http.RoundTripper - - tracer trace.Tracer - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - filters []Filter - spanNameFormatter func(string, *http.Request) string - clientTrace func(context.Context) *httptrace.ClientTrace -} - -var _ http.RoundTripper = &Transport{} - -// NewTransport wraps the provided http.RoundTripper with one that -// starts a span and injects the span context into the outbound request headers. -// -// If the provided http.RoundTripper is nil, http.DefaultTransport will be used -// as the base http.RoundTripper -func NewTransport(base http.RoundTripper, opts ...Option) *Transport { - if base == nil { - base = http.DefaultTransport - } - - t := Transport{ - rt: base, - } - - defaultOpts := []Option{ - WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), - WithSpanNameFormatter(defaultTransportFormatter), - } - - c := newConfig(append(defaultOpts, opts...)...) - t.applyConfig(c) - - return &t -} - -func (t *Transport) applyConfig(c *config) { - t.tracer = c.Tracer - t.propagators = c.Propagators - t.spanStartOptions = c.SpanStartOptions - t.filters = c.Filters - t.spanNameFormatter = c.SpanNameFormatter - t.clientTrace = c.ClientTrace -} - -func defaultTransportFormatter(_ string, r *http.Request) string { - return "HTTP " + r.Method -} - -// RoundTrip creates a Span and propagates its context via the provided request's headers -// before handing the request to the configured base RoundTripper. The created span will -// end when the response body is closed or when a read from the body returns io.EOF. -func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { - for _, f := range t.filters { - if !f(r) { - // Simply pass through to the base RoundTripper if a filter rejects the request - return t.rt.RoundTrip(r) - } - } - - tracer := t.tracer - - if tracer == nil { - if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { - tracer = newTracer(span.TracerProvider()) - } else { - tracer = newTracer(otel.GetTracerProvider()) - } - } - - opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options - - ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...) - - if t.clientTrace != nil { - ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) - } - - r = r.WithContext(ctx) - span.SetAttributes(semconv.HTTPClientAttributesFromHTTPRequest(r)...) - t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) - - res, err := t.rt.RoundTrip(r) - if err != nil { - span.RecordError(err) - span.SetStatus(codes.Error, err.Error()) - span.End() - return res, err - } - - span.SetAttributes(semconv.HTTPAttributesFromHTTPStatusCode(res.StatusCode)...) - span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(res.StatusCode)) - res.Body = newWrappedBody(span, res.Body) - - return res, err -} - -// newWrappedBody returns a new and appropriately scoped *wrappedBody as an -// io.ReadCloser. If the passed body implements io.Writer, the returned value -// will implement io.ReadWriteCloser. -func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser { - // The successful protocol switch responses will have a body that - // implement an io.ReadWriteCloser. Ensure this interface type continues - // to be satisfied if that is the case. - if _, ok := body.(io.ReadWriteCloser); ok { - return &wrappedBody{span: span, body: body} - } - - // Remove the implementation of the io.ReadWriteCloser and only implement - // the io.ReadCloser. - return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}} -} - -// wrappedBody is the response body type returned by the transport -// instrumentation to complete a span. Errors encountered when using the -// response body are recorded in span tracking the response. -// -// The span tracking the response is ended when this body is closed. -// -// If the response body implements the io.Writer interface (i.e. for -// successful protocol switches), the wrapped body also will. -type wrappedBody struct { - span trace.Span - body io.ReadCloser -} - -var _ io.ReadWriteCloser = &wrappedBody{} - -func (wb *wrappedBody) Write(p []byte) (int, error) { - // This will not panic given the guard in newWrappedBody. - n, err := wb.body.(io.Writer).Write(p) - if err != nil { - wb.span.RecordError(err) - wb.span.SetStatus(codes.Error, err.Error()) - } - return n, err -} - -func (wb *wrappedBody) Read(b []byte) (int, error) { - n, err := wb.body.Read(b) - - switch err { - case nil: - // nothing to do here but fall through to the return - case io.EOF: - wb.span.End() - default: - wb.span.RecordError(err) - wb.span.SetStatus(codes.Error, err.Error()) - } - return n, err -} - -func (wb *wrappedBody) Close() error { - wb.span.End() - return wb.body.Close() -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go deleted file mode 100644 index 63402819ca..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otelhttp - -// Version is the current release version of the otelhttp instrumentation. -func Version() string { - return "0.29.0" - // This string is updated by the pre_release.sh script during release -} - -// SemVersion is the semantic version to be supplied to tracer/meter creation. -func SemVersion() string { - return "semver:" + Version() -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go deleted file mode 100644 index ec787c820a..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otelhttp - -import ( - "context" - "io" - "net/http" - - "go.opentelemetry.io/otel/propagation" -) - -var _ io.ReadCloser = &bodyWrapper{} - -// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number -// of bytes read and the last error -type bodyWrapper struct { - io.ReadCloser - record func(n int64) // must not be nil - - read int64 - err error -} - -func (w *bodyWrapper) Read(b []byte) (int, error) { - n, err := w.ReadCloser.Read(b) - n1 := int64(n) - w.read += n1 - w.err = err - w.record(n1) - return n, err -} - -func (w *bodyWrapper) Close() error { - return w.ReadCloser.Close() -} - -var _ http.ResponseWriter = &respWriterWrapper{} - -// respWriterWrapper wraps a http.ResponseWriter in order to track the number of -// bytes written, the last error, and to catch the returned statusCode -// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional -// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) -// that may be useful when using it in real life situations. -type respWriterWrapper struct { - http.ResponseWriter - record func(n int64) // must not be nil - - // used to inject the header - ctx context.Context - - props propagation.TextMapPropagator - - written int64 - statusCode int - err error - wroteHeader bool -} - -func (w *respWriterWrapper) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *respWriterWrapper) Write(p []byte) (int, error) { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - n, err := w.ResponseWriter.Write(p) - n1 := int64(n) - w.record(n1) - w.written += n1 - w.err = err - return n, err -} - -func (w *respWriterWrapper) WriteHeader(statusCode int) { - if w.wroteHeader { - return - } - w.wroteHeader = true - w.statusCode = statusCode - w.props.Inject(w.ctx, propagation.HeaderCarrier(w.Header())) - w.ResponseWriter.WriteHeader(statusCode) -} diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes deleted file mode 100644 index 314766e91b..0000000000 --- a/vendor/go.opentelemetry.io/otel/.gitattributes +++ /dev/null @@ -1,3 +0,0 @@ -* text=auto eol=lf -*.{cmd,[cC][mM][dD]} text eol=crlf -*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore deleted file mode 100644 index 759cf53e00..0000000000 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ /dev/null @@ -1,21 +0,0 @@ -.DS_Store -Thumbs.db - -.tools/ -.idea/ -.vscode/ -*.iml -*.so -coverage.* - -gen/ - -/example/fib/fib -/example/jaeger/jaeger -/example/namedtracer/namedtracer -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/prom-collector/prom-collector -/example/zipkin/zipkin -/example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f56982..0000000000 --- a/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml deleted file mode 100644 index 7a5fdc07ab..0000000000 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ /dev/null @@ -1,47 +0,0 @@ -# See https://github.com/golangci/golangci-lint#config-file -run: - issues-exit-code: 1 #Default - tests: true #Default - -linters: - # Disable everything by default so upgrades to not include new "default - # enabled" linters. - disable-all: true - # Specifically enable linters we want to use. - enable: - - deadcode - - errcheck - - gofmt - - goimports - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - -issues: - exclude-rules: - # helpers in tests often (rightfully) pass a *testing.T as their first argument - - path: _test\.go - text: "context.Context should be the first parameter of a function" - linters: - - revive - # Yes, they are, but it's okay in a test - - path: _test\.go - text: "exported func.*returns unexported type.*which can be annoying to use" - linters: - - revive - -linters-settings: - misspell: - locale: US - ignore-words: - - cancelled - goimports: - local-prefixes: go.opentelemetry.io diff --git a/vendor/go.opentelemetry.io/otel/.markdown-link.json b/vendor/go.opentelemetry.io/otel/.markdown-link.json deleted file mode 100644 index f222ad89c3..0000000000 --- a/vendor/go.opentelemetry.io/otel/.markdown-link.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "ignorePatterns": [ - { - "pattern": "^http(s)?://localhost" - } - ], - "replacementPatterns": [ - { - "pattern": "^/registry", - "replacement": "https://opentelemetry.io/registry" - } - ], - "retryOn429": true, - "retryCount": 5, - "fallbackRetryDelay": "30s" -} diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml deleted file mode 100644 index 3202496c35..0000000000 --- a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Default state for all rules -default: true - -# ul-style -MD004: false - -# hard-tabs -MD010: false - -# line-length -MD013: false - -# no-duplicate-header -MD024: - siblings_only: true - -#single-title -MD025: false - -# ol-prefix -MD029: - style: ordered - -# no-inline-html -MD033: false - -# fenced-code-language -MD040: false - diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md deleted file mode 100644 index 42dada4905..0000000000 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ /dev/null @@ -1,1733 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - -This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [1.4.1] - 2022-02-16 - -### Fixed - -- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) - -## [1.4.0] - 2022-02-11 - -### Added - -- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) -- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. - To enable use a logger with Verbosity (V level) `>=1`. (#2500) -- Added support to configure the batch span-processor with environment variables. - The following environment variables are used. (#2515) - - `OTEL_BSP_SCHEDULE_DELAY` - - `OTEL_BSP_EXPORT_TIMEOUT` - - `OTEL_BSP_MAX_QUEUE_SIZE`. - - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` - -### Changed - -- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) - -### Deprecated - -- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. - Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) -- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) - -### Fixed - -- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) -- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) -- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) -- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) -- W3C baggage will now decode urlescaped values. (#2529) -- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) -- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. - Instead of dropping the least-recently-used attribute, the last added attribute is dropped. - This drop order still only applies to attributes with unique keys not already contained in the span. - If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) - -### Removed - -- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) - - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) - - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) - - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) - -## [1.3.0] - 2021-12-10 - -### ⚠️ Notice ⚠️ - -We have updated the project minimum supported Go version to 1.16 - -### Added - -- Added an internal Logger. - This can be used by the SDK and API to provide users with feedback of the internal state. - To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) -- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) -- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) - -### Changed - -- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) -- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) -- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) -- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) -- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) - -### Fixed - -- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. - Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. - When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) -- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) -- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) - -### Deprecated - -- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) -- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) - -### Removed - -- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) -- Remove the metric Bound Instruments interface and implementations. (#2399) -- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) -- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) - -## [1.2.0] - 2021-11-12 - -### Changed - -- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) -- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) -- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: - - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` - - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. - - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) -- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) - -### Added - -- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) -- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) -- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) - -## [1.1.0] - 2021-10-27 - -### Added - -- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) -- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. - The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) -- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. - The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) -- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. - The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) - - When upgrading from the `semconv/v1.4.0` package note the following name changes: - - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` - - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` - - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` - - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` - - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` - - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` - -### Changed - -- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). - -### Fixed - -- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) -- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) -- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) - -## [1.0.1] - 2021-10-01 - -### Fixed - -- json stdout exporter no longer crashes due to concurrency bug. (#2265) - -## [Metrics 0.24.0] - 2021-10-01 - -### Changed - -- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) -- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) - - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. - - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. - -## [1.0.0] - 2021-09-20 - -This is the first stable release for the project. -This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). - -### Added - -- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) - -### Fixed - -- Slice-valued attributes can correctly be used as map keys. (#2223) - -### Removed - -- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) -- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) -- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) -- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. - Use the typed functions and methods added to the package instead. (#2235) - - The `Key.Array` method is removed. - - The `Array` function is removed. - - The `Any` function is removed. - - The `ArrayValue` function is removed. - - The `AsArray` function is removed. - -## [1.0.0-RC3] - 2021-09-02 - -### Added - -- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) -- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) -- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) - - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. -- Added the `go.opentelemetry.io/otel/example/fib` example package. - Included is an example application that computes Fibonacci numbers. (#2203) - -### Changed - -- Metric instruments have been renamed to match the (feature-frozen) metric API specification: - - ValueRecorder becomes Histogram - - ValueObserver becomes Gauge - - SumObserver becomes CounterObserver - - UpDownSumObserver becomes UpDownCounterObserver - The API exported from this project is still considered experimental. (#2202) -- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) -- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) -- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) -- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) - -### Deprecated - -- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. - All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. - The functions from that package should be used instead. (#2166) -- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. - Use the typed `*Slice` functions and types added to the package instead. (#2162) -- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. - Use the typed functions instead. (#2181) -- The `go.opentelemetry.io/otel/oteltest` package is deprecated. - The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) - -### Removed - -- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) - -### Fixed - -- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) -- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) -- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) -- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) -- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195) -- Fixed typos in resources.go. (#2201) - -## [1.0.0-RC2] - 2021-07-26 - -### Added - -- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) -- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) -- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. - This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) -- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) -- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. - This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. - For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) -- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. - This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) - -### Changed - -- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) -- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) - -### Deprecated - -- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) -- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) -- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. - Use the `trace.ParseTraceState` function instead. (#2122) - -### Removed - -- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) -- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) -- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. - The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) -- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. - The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) -- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) - -### Fixed - -- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) -- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) -- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) -- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. - This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) -- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) -- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) - -## [Experimental Metrics v0.22.0] - 2021-07-19 - -### Added - -- Adds HTTP support for OTLP metrics exporter. (#2022) - -### Removed - -- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) - -## [1.0.0-RC1] / 0.21.0 - 2021-06-18 - -With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` -while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules -with major version 0. - -### Added - -- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) - - The following status codes are defined as transient errors: - | gRPC Status Code | Description | - | ---------------- | ----------- | - | 1 | Cancelled | - | 4 | Deadline Exceeded | - | 8 | Resource Exhausted | - | 10 | Aborted | - | 10 | Out of Range | - | 14 | Unavailable | - | 15 | Data Loss | -- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) -- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. - This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) -- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) -- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) -- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) -- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. - It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) -- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. - This method returns the number of list-members the `TraceState` holds. (#1937) -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. - Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) -- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) -- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. - These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) -- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) -- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) -- Several builtin resource detectors now correctly populate the schema URL. (#1938) -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) -- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) -- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) -- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) - -### Changed - -- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. - `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) -- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) -- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) -- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) -- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) -- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) -- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) -- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. - This method returns the status of a span using the new `Status` type. (#1874) -- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. - This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) -- Unembed `SpanContext` in `Link`. (#1877) -- Generate Semantic conventions from the specification YAML. (#1891) -- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) -- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) -- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) -- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) -- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) -- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) -- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) -- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) -- Refactored option types according to the contribution style guide. (#1882) -- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. - This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. - The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) -- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) -- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) -- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) -- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) -- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) -- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) -- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) -- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) - -### Deprecated - -- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) -- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) -- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) - -### Removed - -- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) -- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) -- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. - The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. - The `IsRecording` method returns if the span is recording or not. - A read-only span value does not need to know if updates to it will be recorded or not. - By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) -- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. - The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. - When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) -- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. - Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. - The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) - - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) -- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) -- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) -- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. - Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) -- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. - These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) -- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) -- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) - -### Fixed - -- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) -- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) -- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) -- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) -- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) -- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) -- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) - -### Security - -## [0.20.0] - 2021-04-23 - -### Added - -- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) -- Adds semantic conventions for exceptions. (#1492) -- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` - These environment variables can be used to override Jaeger agent hostname and port (#1752) -- Option `ExportTimeout` was added to batch span processor. (#1755) -- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) -- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) -- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) -- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) -- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) -- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) -- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) - - `process.pid` - - `process.executable.name` - - `process.executable.path` - - `process.command_args` - - `process.owner` - - `process.runtime.name` - - `process.runtime.version` - - `process.runtime.description` -- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) -- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) - - `OTEL_EXPORTER_OTLP_ENDPOINT` - - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` - - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` - - `OTEL_EXPORTER_OTLP_HEADERS` - - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` - - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` - - `OTEL_EXPORTER_OTLP_COMPRESSION` - - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` - - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` - - `OTEL_EXPORTER_OTLP_TIMEOUT` - - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` - - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` - - `OTEL_EXPORTER_OTLP_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` -- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) -- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) - -### Fixed - -- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) -- The Jaeger exporter now correctly sets tags for the Span status code and message. - This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) -- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. - Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) -- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) -- Fixed typo for default service name in Jaeger Exporter. (#1797) -- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) -- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. - Instead, the exporter now splits the batch into smaller sendable batches. (#1828) - -### Changed - -- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) -- Jaeger exporter was updated to use thrift v0.14.1. (#1712) -- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) -- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) -- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. - The Span's SpanContext can now self-identify as being remote or not. - This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) -- Improve OTLP/gRPC exporter connection errors. (#1737) -- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. - The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) -- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. - This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) -- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` - to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) -- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) -- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. - It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) -- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) -- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) -- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) -- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) -- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) -- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. - This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) -- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) -- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. - The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) -- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) -- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) - -### Removed - -- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` - These environment variables will no longer be used to override values of the Jaeger exporter (#1752) -- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. - This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. - To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) -- Setting error status while recording error with Span from oteltest package. (#1729) -- The concept of a remote and local Span stored in a context is unified to just the current Span. - Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. - If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) -- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. - This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) -- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) -- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. - The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) -- Remove the `WithDisabled` option from the Jaeger exporter. - To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) -- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. - These functions for retrieving specific environment variable values are redundant of other internal functions and - are not intended for end user use. (#1824) -- Removed the Jaeger exporter `WithSDKOptions` `Option`. - This option was used to set SDK options for the exporter creation convenience functions. - These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. - If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) -- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. - The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) -- The Jaeger exporter `Option` type is removed. - The type is no longer used by the exporter to configure anything. - All the previous configurations these options provided were duplicates of SDK configuration. - They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) - -## [0.19.0] - 2021-03-18 - -### Added - -- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) -- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) -- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) -- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) -- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) - -### Changed - -- `trace.SpanContext` is now immutable and has no exported fields. (#1573) - - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. -- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) -- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) -- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) -- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) -- Added non-empty string check for trace `Attribute` keys. (#1659) -- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) -- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) -- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) -- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) -- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) -- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) - -### Removed - -- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) -- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) -- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. - These are now returned as a SpanProcessor interface from their respective constructors. (#1638) -- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) -- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) -- Removed `jaeger.WithProcess` configuration option. (#1673) -- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) - -### Fixed - -- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) -- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) -- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) -- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) -- Synchronization issues in global trace delegate implementation. (#1686) -- Reduced excess memory usage by global `TracerProvider`. (#1687) - -## [0.18.0] - 2021-03-03 - -### Added - -- Added `resource.Default()` for use with meter and tracer providers. (#1507) -- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) -- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) -- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) -- Compatibility testing suite in the CI system for the following systems. (#1567) - | OS | Go Version | Architecture | - | ------- | ---------- | ------------ | - | Ubuntu | 1.15 | amd64 | - | Ubuntu | 1.14 | amd64 | - | Ubuntu | 1.15 | 386 | - | Ubuntu | 1.14 | 386 | - | MacOS | 1.15 | amd64 | - | MacOS | 1.14 | amd64 | - | Windows | 1.15 | amd64 | - | Windows | 1.14 | amd64 | - | Windows | 1.15 | 386 | - | Windows | 1.14 | 386 | - -### Changed - -- Replaced interface `oteltest.SpanRecorder` with its existing implementation - `StandardSpanRecorder`. (#1542) -- Default span limit values to 128. (#1535) -- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) -- Renamed the `otel/label` package to `otel/attribute`. (#1541) -- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) -- Parallelize the CI linting and testing. (#1567) -- Stagger timestamps in exact aggregator tests. (#1569) -- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) -- Prevent end-users from implementing some interfaces (#1575) - - ``` - "otel/exporters/otlp/otlphttp".Option - "otel/exporters/stdout".Option - "otel/oteltest".Option - "otel/trace".TracerOption - "otel/trace".SpanOption - "otel/trace".EventOption - "otel/trace".LifeCycleOption - "otel/trace".InstrumentationOption - "otel/sdk/resource".Option - "otel/sdk/trace".ParentBasedSamplerOption - "otel/sdk/trace".ReadOnlySpan - "otel/sdk/trace".ReadWriteSpan - ``` - -### Removed - -- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) -- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) -- Removed the `test-386` make target. - This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) - -### Fixed - -- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) -- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) -- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) -- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) -- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) - -## [0.17.0] - 2021-02-12 - -### Changed - -- Rename project default branch from `master` to `main`. (#1505) -- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) -- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) -- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) -- Move metric-related public global APIs from otel to otel/metric/global. (#1528) - -## Fixed - -- Fixed otlpgrpc reconnection issue. -- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) -- The otel-collector example now uses the default OTLP receiver port of the collector. - -## [0.16.0] - 2021-01-13 - -### Added - -- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) -- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) -- Added documentation about the project's versioning policy. (#1388) -- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) -- Added codeql worfklow to GitHub Actions (#1428) -- Added Gosec workflow to GitHub Actions (#1429) -- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) -- Add an OpenCensus exporter bridge. (#1444) - -### Changed - -- Rename `internal/testing` to `internal/internaltest`. (#1449) -- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) -- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) -- Improve span duration accuracy. (#1360) -- Migrated CI/CD from CircleCI to GitHub Actions (#1382) -- Remove duplicate checkout from GitHub Actions workflow (#1407) -- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) -- Metric `exact` aggregator includes per-point timestamps (#1412) -- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) -- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) -- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) -- Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) -- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) -- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) -- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) -- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) -- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) -- Metric Push and Pull Controller components are combined into a single "basic" Controller: - - `WithExporter()` and `Start()` to configure Push behavior - - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior - - `Start()` and `Stop()` accept Context. (#1378) -- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) - -### Removed - -- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) -- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) -- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) - -### Fixed - -- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) - -## [0.15.0] - 2020-12-10 - -### Added - -- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) - -### Changed - -- The Zipkin exporter now uses the Span status code to determine. (#1328) -- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) -- Move the OpenCensus example into `example` directory. (#1359) -- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) -- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) -- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) - -### Fixed - -- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) - -## [0.14.0] - 2020-11-19 - -### Added - -- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) -- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) -- `SpanContextFromContext` returns `SpanContext` from context. (#1255) -- `TraceState` has been added to `SpanContext`. (#1340) -- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) -- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) -- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) -- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) - -### Changed - -- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) - - `ID` has been renamed to `TraceID`. - - `IDFromHex` has been renamed to `TraceIDFromHex`. - - `EmptySpanContext` is removed. -- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) -- OTLP Exporter updates: - - supports OTLP v0.6.0 (#1230, #1354) - - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) -- The Sampler is now called on local child spans. (#1233) -- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) -- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. - This matches the returned type and fixes misuse of the term metric. (#1240) -- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) -- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) -- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) -- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) -- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) -- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) -- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) -- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) -- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) -- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) -- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) -- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and - `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) -- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) -- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) -- Updated span collection limits for attribute, event and link counts to 1000 (#1318) -- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) - -### Removed - -- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) -- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. - It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) -- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. - `Tracer` and `Span` from the same module should be used in their place instead. (#1306) -- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) -- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) - -### Fixed - -- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) -- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) -- Fix condition in `label.Any`. (#1299) -- Fix global `TracerProvider` to pass options to its configured provider. (#1329) -- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) - -## [0.13.0] - 2020-10-08 - -### Added - -- OTLP Metric exporter supports Histogram aggregation. (#1209) -- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) -- A Baggage API to implement the OpenTelemetry specification. (#1217) -- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) - -### Changed - -- Set default propagator to no-op propagator. (#1184) -- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) -- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) -- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. - They now are `Unset`, `Error`, and `Ok`. - They no longer track the gRPC codes. (#1214) -- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) -- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) -- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) - -### Fixed - -- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) - -### Removed - -- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) -- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. - The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) -- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) -- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) -- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) -- Nested array/slice support has been removed. (#1226) - -## [0.12.0] - 2020-09-24 - -### Added - -- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) -- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. - This addition was made to conform with our project option conventions. (#1155) -- Instrumentation library information was added to the Zipkin exporter. (#1119) -- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) -- More semantic conventions for k8s as resource attributes. (#1167) - -### Changed - -- Add reconnecting udp connection type to Jaeger exporter. - This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. - It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) -- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. - This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) -- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. - This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) -- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) -- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. - This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) -- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) -- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) -- Move `tools` package under `internal`. (#1141) -- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) - The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. -- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) -- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) -- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) -- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to - recommend the use of `newConfig()` instead of `configure()`. (#1163) -- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) -- Ensure exported interface types include parameter names and update the - Style Guide to reflect this styling rule. (#1172) -- Don't consider unset environment variable for resource detection to be an error. (#1170) -- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and - `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. -- ValueObserver instruments use LastValue aggregator by default. (#1165) -- OTLP Metric exporter supports LastValue aggregation. (#1165) -- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) -- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) -- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) -- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) -- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) -- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) -- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) -- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) -- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) -- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) -- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) -- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) -- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) -- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) -- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) -- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) -- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) -- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) - -### Removed - -- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the - `go.opentelemetry.io/contrib/propagators/` module. (#1191) -- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) - -### Fixed - -- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) -- Fix missing shutdown processor in otel-collector example. (#1186) -- Fix missing shutdown processor in basic and namedtracer examples. (#1197) - -## [0.11.0] - 2020-08-24 - -### Added - -- Support for exporting array-valued attributes via OTLP. (#992) -- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) -- Support for filtering metric label sets. (#1047) -- A dimensionality-reducing metric Processor. (#1057) -- Integration tests for more OTel Collector Attribute types. (#1062) -- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) - -### Changed - -- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) -- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) -- Rename `api/testharness` to `api/apitest`. (#1049) -- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) -- Change Metric Processor to merge multiple observations. (#1024) -- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. - This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) -- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) -- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) -- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) -- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) -- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) -- Unify Callback Function Naming. - Rename `*Callback` with `*Func`. (#1061) -- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) -- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. - This interface still supports the export of `SpanData`, but only as a slice. - Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) -- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. - If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) -- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. - This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) - -### Removed - -- Duplicate, unused API sampler interface. (#999) - Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. -- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. - This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) -- The `WithSpan` method of the `Tracer` interface. - The functionality this method provided was limited compared to what a user can provide themselves. - It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) -- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. - These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) -- The `oterror` package. (#1026) -- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) - -### Fixed - -- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) -- Correct instrumentation version tag in Jaeger exporter. (#1037) -- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) -- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) -- The `otel-collector` example referenced outdated collector processors. (#1006) - -## [0.10.0] - 2020-07-29 - -This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. - -### Added - -- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. - These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) -- Add propagator option for gRPC instrumentation. (#986) -- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) - -### Changed - -- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. - This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) -- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. - This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) -- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) -- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) - - `value.Bool` was replaced with `kv.BoolValue`. - - `value.Int64` was replaced with `kv.Int64Value`. - - `value.Uint64` was replaced with `kv.Uint64Value`. - - `value.Float64` was replaced with `kv.Float64Value`. - - `value.Int32` was replaced with `kv.Int32Value`. - - `value.Uint32` was replaced with `kv.Uint32Value`. - - `value.Float32` was replaced with `kv.Float32Value`. - - `value.String` was replaced with `kv.StringValue`. - - `value.Int` was replaced with `kv.IntValue`. - - `value.Uint` was replaced with `kv.UintValue`. - - `value.Array` was replaced with `kv.ArrayValue`. -- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) -- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) -- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) -- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) -- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) - -### Removed - -- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) - -### Fixed - -- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) -- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) -- Use `global.Handle` for span export errors in the OTLP exporter. (#946) -- Correct Go language formatting in the README documentation. (#961) -- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) -- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) -- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) - -## [0.9.0] - 2020-07-20 - -### Added - -- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) -- A Detector to automatically detect resources from an environment variable. (#939) -- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) -- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. - References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) - -### Changed - -- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) - -### Removed - -- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) - -## [0.8.0] - 2020-07-09 - -### Added - -- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. - A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) -- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) -- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) -- Add `peer.service` semantic attribute. (#898) -- Add database-specific semantic attributes. (#899) -- Add semantic convention for `faas.coldstart` and `container.id`. (#909) -- Add http content size semantic conventions. (#905) -- Include `http.request_content_length` in HTTP request basic attributes. (#905) -- Add semantic conventions for operating system process resource attribute keys. (#919) -- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) - -### Changed - -- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) -- Use lowercase header names for B3 Multiple Headers. (#881) -- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. - This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. - If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) -- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. - Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. - This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) -- Extend semantic conventions for RPC. (#900) -- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) - - `"api/standard".FaaSName` -> `FaaSNameKey` - - `"api/standard".FaaSID` -> `FaaSIDKey` - - `"api/standard".FaaSVersion` -> `FaaSVersionKey` - - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` - -### Removed - -- The `FlagsUnused` trace flag is removed. - The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) -- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. - If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) - -### Fixed - -- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) -- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) -- The B3 propagator now propagates the debug flag. - This removes the behavior of changing the debug flag into a set sampling bit. - Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) -- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) -- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) -- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) -- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) -- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) -- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) -- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) -- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) -- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -- Update otel-colector example to use the v0.5.0 collector. (#915) -- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) -- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) -- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. - This is in accordance with OpenTelemetry semantic conventions. (#922) -- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) -- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) -- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) -- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) - -## [0.7.0] - 2020-06-26 - -This release implements the v0.5.0 version of the OpenTelemetry specification. - -### Added - -- The othttp instrumentation now includes default metrics. (#861) -- This CHANGELOG file to track all changes in the project going forward. -- Support for array type attributes. (#798) -- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) -- Timestamps are now passed to exporters for each export. (#835) -- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. - This replaces the prior `Record` `struct` use for this purpose. (#835) -- New dependabot integration to automate package upgrades. (#814) -- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. - This instrumentation version is passed on to exporters. (#811) (#805) (#802) -- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) -- Environment variables for Jaeger exporter are supported. (#796) -- New `aggregation.Kind` in the export metric API. (#808) -- New example that uses OTLP and the collector. (#790) -- Handle errors in the span `SetName` during span initialization. (#791) -- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) -- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) -- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. - There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) -- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) -- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) -- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 - -### Changed - -- Rename `Integrator` to `Processor` in the metric SDK. (#863) -- Rename `AggregationSelector` to `AggregatorSelector`. (#859) -- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) -- Rename `simple` integrator to `basic` integrator. (#857) -- Merge otlp collector examples. (#841) -- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. - With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) -- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) -- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. - All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. - Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) -- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) -- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) -- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 -- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) -- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) -- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) -- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) - -### Removed - -- `Uint64NumberKind` and related functions from the API. (#864) -- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) -- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) - -### Fixed - -- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) -- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) -- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) -- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) -- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) -- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) -- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) -- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) -- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) -- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) -- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) -- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) -- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) -- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) -- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) -- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) -- Set span status from HTTP status code in the othttp instrumentation. (#832) -- Fixed typo in push controller comment. (#834) -- The `Aggregator` testing has been updated and cleaned. (#812) -- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) -- Fixed `global` `handler_test.go` test failure. #804 -- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) -- Fixed OTLP example's accidental early close of exporter. (#807) -- Ensure zipkin exporter reads and closes response body. (#788) -- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) -- Clean up tools and RELEASING documentation. (#762) - -## [0.6.0] - 2020-05-21 - -### Added - -- Support for `Resource`s in the prometheus exporter. (#757) -- New pull controller. (#751) -- New `UpDownSumObserver` instrument. (#750) -- OpenTelemetry collector demo. (#711) -- New `SumObserver` instrument. (#747) -- New `UpDownCounter` instrument. (#745) -- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) -- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) - -### Changed - -- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) -- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) -- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) -- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) -- The prometheus exporter now uses the new pull controller. (#751) -- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) -- Support use of synchronous instruments in asynchronous callbacks (#725) -- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) -- Rename `Observer` instrument to `ValueObserver`. (#734) -- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) -- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) -- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) - -### Fixed - -- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) -- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) -- Fix `string` case in `kv` `Infer` function. (#746) -- Fix panic in grpctrace client interceptors. (#740) -- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) -- Rewrite span batch process queue batching logic. (#719) -- Remove the push controller named Meter map. (#738) -- Fix Histogram aggregator initial state (fix #735). (#736) -- Ensure golang alpine image is running `golang-1.14` for examples. (#733) -- Added test for grpctrace `UnaryInterceptorClient`. (#695) -- Rearrange `api/metric` code layout. (#724) - -## [0.5.0] - 2020-05-13 - -### Added - -- Batch `Observer` callback support. (#717) -- Alias `api` types to root package of project. (#696) -- Create basic `othttp.Transport` for simple client instrumentation. (#678) -- `SetAttribute(string, interface{})` to the trace API. (#674) -- Jaeger exporter option that allows user to specify custom http client. (#671) -- `Stringer` and `Infer` methods to `key`s. (#662) - -### Changed - -- Rename `NewKey` in the `kv` package to just `Key`. (#721) -- Move `core` and `key` to `kv` package. (#720) -- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) -- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) -- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) -- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) -- Move `Number` from `core` to `api/metric` package. (#706) -- Move `SpanContext` from `core` to `trace` package. (#692) -- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) - -### Fixed - -- Update tooling to run generators in all submodules. (#705) -- gRPC interceptor regexp to match methods without a service name. (#683) -- Use a `const` for padding 64-bit B3 trace IDs. (#701) -- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) -- Left-pad 64-bit B3 trace IDs with zero. (#698) -- Propagate at least the first W3C tracestate header. (#694) -- Remove internal `StateLocker` implementation. (#688) -- Increase instance size CI system uses. (#690) -- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) -- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) -- Reimplement histogram using mutex instead of `StateLocker`. (#669) -- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) -- Update documentation to not include any references to `WithKeys`. (#672) -- Correct misspelling. (#668) -- Fix clobbering of the span context if extraction fails. (#656) -- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) - -## [0.4.3] - 2020-04-24 - -### Added - -- `Dockerfile` and `docker-compose.yml` to run example code. (#635) -- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) -- New `api/label` package, providing common label set implementation. (#651) -- Support for JSON marshaling of `Resources`. (#654) -- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) -- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) -- `WithSpanFormatter` option to the othttp plugin. (#617) -- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) -- The prometheus exporter now supports exporting histograms. (#601) -- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) -- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) -- An `Equal` method to the `Resource` test the equivalence of resources. (#613) -- An iterable structure (`AttributeIterator`) for `Resource` attributes. - -### Changed - -- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) -- Pass `Resources` through the metrics export pipeline. (#659) - -### Removed - -- `WithKeys` option from the metric API. (#639) - -### Fixed - -- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) -- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) -- Use type names for return values in jaeger exporter. (#648) -- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) -- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) -- Do not cache `reflect.ValueOf()` in metric Labels. (#649) -- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) -- Add error wrapping to the prometheus exporter. (#631) -- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) -- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) -- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) -- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) -- Ensure spans created by httptrace client tracer reflect operation structure. (#618) -- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 -- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) - -## [0.4.2] - 2020-03-31 - -### Fixed - -- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) -- Fix time conversion from internal to OTLP in OTLP exporter. (#606) - -## [0.4.1] - 2020-03-31 - -### Fixed - -- Update `tag.sh` to create signed tags. (#604) - -## [0.4.0] - 2020-03-30 - -### Added - -- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) -- Script to verify examples after a new release. (#579) - -### Removed - -- The dogstatsd exporter due to lack of support. - This additionally removes support for statsd. (#591) -- `LabelSet` from the metric API. - This is replaced by a `[]core.KeyValue` slice. (#595) -- `Labels` from the metric API's `Meter` interface. (#595) - -### Changed - -- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) -- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) -- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) - -### Fixed - -- Corrected missing return in mock span. (#582) -- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) -- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) -- Update pre-release script to be compatible between GNU and BSD based systems. (#592) -- Add a `RecordBatch` benchmark. (#594) -- Moved span transforms of the OTLP exporter to the internal package. (#593) -- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) -- Removed unneeded allocation on empty labels in OLTP exporter. (#597) -- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) -- Update project documentation godoc.org links to pkg.go.dev. (#602) - -## [0.3.0] - 2020-03-21 - -This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. -There is still a possibility of breaking changes. - -### Added - -- Add `Observer` metric instrument. (#474) -- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) -- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) -- The zipkin trace exporter. (#495) -- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) -- Add `StatusMessage` field to the trace `Span`. (#524) -- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) -- The `Resource` type was added to the SDK. (#528) -- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) -- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. - Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) -- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) -- Scripts to better automate the release process. (#576) - -### Changed - -- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) -- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) -- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) -- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) -- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) -- Rename metric API `Options` to `Config`. (#541) -- Rename metric `Counter` aggregator to be `Sum`. (#541) -- Unify metric options into `Option` from instrument specific options. (#541) -- The trace API's `TraceProvider` now support `Resource`s. (#545) -- Correct error in zipkin module name. (#548) -- The jaeger trace exporter now supports `Resource`s. (#551) -- Metric SDK now supports `Resource`s. - The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) -- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) -- The stdout trace exporter now supports `Resource`s. (#558) -- The metric `Descriptor` is now included at the API instead of the SDK. (#560) -- Replace `Ordered` with an iterator in `export.Labels`. (#567) - -### Removed - -- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) -- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) -- `GetDescriptor` from the metric SDK. (#575) -- The `Gauge` instrument from the metric API. (#537) - -### Fixed - -- Make histogram aggregator checkpoint consistent. (#438) -- Update README with import instructions and how to build and test. (#505) -- The default label encoding was updated to be unique. (#508) -- Use `NewRoot` in the othttp plugin for public endpoints. (#513) -- Fix data race in `BatchedSpanProcessor`. (#518) -- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 -- Use a variable-size array to represent ordered labels in maps. (#523) -- Update the OTLP protobuf and update changed import path. (#532) -- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) -- Eliminate goroutine leak in histogram stress test. (#547) -- Update OTLP exporter with latest protobuf. (#550) -- Add filters to the othttp plugin. (#556) -- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) -- Encode labels once during checkpoint. - The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. - This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) -- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) - -## [0.2.3] - 2020-03-04 - -### Added - -- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) -- Configurable push frequency for exporters setup pipeline. (#504) - -### Changed - -- Rename the `exporter` directory to `exporters`. - The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. - This resulted in all subsequent releases not becoming the default latest. - A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. - Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. - Consequentially, this action also renames *all* exporter packages. (#502) - -### Removed - -- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) - -## [0.2.2] - 2020-02-27 - -### Added - -- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) -- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) -- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) -- `Config` and configuring `Option` to the propagator API. (#467) -- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) -- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) -- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) -- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) -- Histogram aggregator. (#433) -- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) -- `AlwaysParentSample` sampler to the trace API. (#455) -- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) - -### Changed - -- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) -- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) -- Move correlation context propagation to correlation package. (#479) -- Do not default to putting remote span context into links. (#480) -- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) -- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) -- Renamed the `export` package to `metric` to match directory structure. (#432) -- Rename the `api/distributedcontext` package to `api/correlation`. (#444) -- Rename the `api/propagators` package to `api/propagation`. (#444) -- Move the propagators from the `propagators` package into the `trace` API package. (#444) -- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) -- Moved all dependencies of tools package to a tools directory. (#466) - -### Removed - -- Binary propagators. (#467) -- NOOP propagator. (#467) - -### Fixed - -- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) -- Fix a possible nil-dereference crash (#478) -- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) -- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) -- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) -- Initialize `onError` based on `Config` in prometheus exporter. (#486) -- Correct module name in prometheus exporter README. (#475) -- Removed tracer name prefix from span names. (#430) -- Fix `aggregator_test.go` import package comment. (#431) -- Improved detail in stdout exporter. (#436) -- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) -- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) -- Reword function documentation in gRPC plugin. (#446) -- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) -- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) -- Upgraded to Go 1.13 in CI. (#465) -- Correct opentelemetry.io URL in trace SDK documentation. (#464) -- Refactored reference counting logic in SDK determination of stale records. (#468) -- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) - -## [0.2.1.1] - 2020-01-13 - -### Fixed - -- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428) - -## [0.2.1] - 2020-01-08 - -### Added - -- Global meter forwarding implementation. - This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) -- Global trace forwarding implementation. - This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) -- Standardize export pipeline creation in all exporters. (#395) -- A testing, organization, and comments for 64-bit field alignment. (#418) -- Script to tag all modules in the project. (#414) - -### Changed - -- Renamed `propagation` package to `propagators`. (#362) -- Renamed `B3Propagator` propagator to `B3`. (#362) -- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) -- Renamed `BinaryPropagator` propagator to `Binary`. (#362) -- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) -- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) -- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) -- Renamed `SpanOption` to `StartOption` in the trace API. (#369) -- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) -- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) -- `Number` now has a pointer receiver for its methods. (#375) -- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) -- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) -- Renamed `Message` in Event to `Name` in the trace API. (#389) -- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) -- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) -- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) -- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) -- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) -- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) -- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) -- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) -- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) -- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) -- Renamed the `File` option in the stdout exporter to `Writer`. (#404) -- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. - -### Fixed - -- Aggregator import path corrected. (#421) -- Correct links in README. (#368) -- The README was updated to match latest code changes in its examples. (#374) -- Don't capitalize error statements. (#375) -- Fix ignored errors. (#375) -- Fix ambiguous variable naming. (#375) -- Removed unnecessary type casting. (#375) -- Use named parameters. (#375) -- Updated release schedule. (#378) -- Correct http-stackdriver example module name. (#394) -- Removed the `http.request` span in `httptrace` package. (#397) -- Add comments in the metrics SDK (#399) -- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) -- Add documentation of compatible exporters in the README. (#405) -- Typo fix. (#408) -- Simplify span check logic in SDK tracer implementation. (#419) - -## [0.2.0] - 2019-12-03 - -### Added - -- Unary gRPC tracing example. (#351) -- Prometheus exporter. (#334) -- Dogstatsd metrics exporter. (#326) - -### Changed - -- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) -- Rename `GetMeter` to `Meter`. (#357) -- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) -- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) -- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) -- Move `/global` package to `/api/global`. (#356) -- Rename `GetTracer` to `Tracer`. (#347) - -### Removed - -- `SetAttribute` from the `Span` interface in the trace API. (#361) -- `AddLink` from the `Span` interface in the trace API. (#349) -- `Link` from the `Span` interface in the trace API. (#349) - -### Fixed - -- Exclude example directories from coverage report. (#365) -- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) -- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) -- Run the race checker for all test. (#354) -- Redundant commands in the Makefile are removed. (#354) -- Split the `generate` and `lint` targets of the Makefile. (#354) -- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) -- Add example Prometheus binary to gitignore. (#358) -- Support negative numbers with the `MaxSumCount`. (#335) -- Resolve race conditions in `push_test.go` identified in #339. (#340) -- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) -- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. - Previously it was testing `AlwaysSample` twice. (#325) -- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) -- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) -- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. - This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. - This was corrected. (#333) - -## [0.1.2] - 2019-11-18 - -### Fixed - -- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) -- Removed unnecessary unslicing of parameters that are already a slice. (#324) - -## [0.1.1] - 2019-11-18 - -This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. - -### Added - -- Metrics stdout export pipeline. (#265) -- Array aggregation for raw measure metrics. (#282) -- The core.Value now have a `MarshalJSON` method. (#281) - -### Removed - -- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) -- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) - -### Changed - -- Allocation in LabelSet construction to reduce GC overhead. (#318) -- `trace.WithAttributes` to append values instead of replacing (#315) -- Use a formula for tolerance in sampling tests. (#298) -- Move export types into trace and metric-specific sub-directories. (#289) -- `SpanKind` back to being based on an `int` type. (#288) - -### Fixed - -- URL to OpenTelemetry website in README. (#323) -- Name of othttp default tracer. (#321) -- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) -- CI modules cache to correctly restore/save from/to the cache. (#316) -- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) -- README now reflects the new code structure introduced with these changes. (#291) -- Make the basic example work. (#279) - -## [0.1.0] - 2019-11-04 - -This is the first release of open-telemetry go library. -It contains api and sdk for trace and meter. - -### Added - -- Initial OpenTelemetry trace and metric API prototypes. -- Initial OpenTelemetry trace, metric, and export SDK packages. -- A wireframe bridge to support compatibility with OpenTracing. -- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. -- Exporters for Jaeger, Stackdriver, and stdout. -- Propagators for binary, B3, and trace-context protocols. -- Project information and guidelines in the form of a README and CONTRIBUTING. -- Tools to build the project and a Makefile to automate the process. -- Apache-2.0 license. -- CircleCI build CI manifest files. -- CODEOWNERS file to track owners of this project. - -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.4.1...HEAD -[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 -[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 -[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 -[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 -[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 -[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 -[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 -[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 -[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 -[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 -[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 -[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 -[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 -[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 -[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 -[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 -[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 -[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 -[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 -[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 -[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 -[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 -[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 -[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 -[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 -[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 -[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 -[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 -[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 -[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 -[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 -[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 -[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 -[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 -[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 -[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 -[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 -[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 -[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 -[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 -[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS deleted file mode 100644 index 808755fe20..0000000000 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ /dev/null @@ -1,17 +0,0 @@ -##################################################### -# -# List of approvers for this repository -# -##################################################### -# -# Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md -# -# -# Learn about CODEOWNERS file format: -# https://help.github.com/en/articles/about-code-owners -# - -* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo @MadVikingGod @pellared - -CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md deleted file mode 100644 index 7dead7084d..0000000000 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ /dev/null @@ -1,521 +0,0 @@ -# Contributing to opentelemetry-go - -The Go special interest group (SIG) meets regularly. See the -OpenTelemetry -[community](https://github.com/open-telemetry/community#golang-sdk) -repo for information on this and other language SIGs. - -See the [public meeting -notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b) -for a summary description of past meetings. To request edit access, -join the meeting or get in touch on -[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). - -## Development - -You can view and edit the source code by cloning this repository: - -```sh -git clone https://github.com/open-telemetry/opentelemetry-go.git -``` - -Run `make test` to run the tests instead of `go test`. - -There are some generated files checked into the repo. To make sure -that the generated files are up-to-date, run `make` (or `make -precommit` - the `precommit` target is the default). - -The `precommit` target also fixes the formatting of the code and -checks the status of the go module files. - -If after running `make precommit` the output of `git status` contains -`nothing to commit, working tree clean` then it means that everything -is up-to-date and properly formatted. - -## Pull Requests - -### How to Send Pull Requests - -Everyone is welcome to contribute code to `opentelemetry-go` via -GitHub pull requests (PRs). - -To create a new PR, fork the project in GitHub and clone the upstream -repo: - -```sh -go get -d go.opentelemetry.io/otel -``` - -(This may print some warning about "build constraints exclude all Go -files", just ignore it.) - -This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You -can alternatively use `git` directly with: - -```sh -git clone https://github.com/open-telemetry/opentelemetry-go -``` - -(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - -that name is a kind of a redirector to GitHub that `go get` can -understand, but `git` does not.) - -This would put the project in the `opentelemetry-go` directory in -current working directory. - -Enter the newly created directory and add your fork as a new remote: - -```sh -git remote add git@github.com:/opentelemetry-go -``` - -Check out a new branch, make modifications, run linters and tests, update -`CHANGELOG.md`, and push the branch to your fork: - -```sh -git checkout -b -# edit files -# update changelog -make precommit -git add -p -git commit -git push -``` - -Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull -request ID to the entry you added to `CHANGELOG.md`. - -### How to Receive Comments - -* If the PR is not ready for review, please put `[WIP]` in the title, - tag it as `work-in-progress`, or mark it as - [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). -* Make sure CLA is signed and CI is clear. - -### How to Get PRs Merged - -A PR is considered to be **ready to merge** when: - -* It has received two approvals from Collaborators/Maintainers (at - different companies). This is not enforced through technical means - and a PR may be **ready to merge** with a single approval if the change - and its approach have been discussed and consensus reached. -* Feedback has been addressed. -* Any substantive changes to your PR will require that you clear any prior - Approval reviews, this includes changes resulting from other feedback. Unless - the approver explicitly stated that their approval will persist across - changes it should be assumed that the PR needs their review again. Other - project members (e.g. approvers, maintainers) can help with this if there are - any questions or if you forget to clear reviews. -* It has been open for review for at least one working day. This gives - people reasonable time to review. -* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for - one day and may be merged with a single Maintainer's approval. -* `CHANGELOG.md` has been updated to reflect what has been - added, changed, removed, or fixed. -* `README.md` has been updated if necessary. -* Urgent fix can take exception as long as it has been actively - communicated. - -Any Maintainer can merge the PR once it is **ready to merge**. - -## Design Choices - -As with other OpenTelemetry clients, opentelemetry-go follows the -[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification). - -It's especially valuable to read through the [library -guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md). - -### Focus on Capabilities, Not Structure Compliance - -OpenTelemetry is an evolving specification, one where the desires and -use cases are clear, but the method to satisfy those uses cases are -not. - -As such, Contributions should provide functionality and behavior that -conforms to the specification, but the interface and structure is -flexible. - -It is preferable to have contributions follow the idioms of the -language rather than conform to specific API names or argument -patterns in the spec. - -For a deeper discussion, see -[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). - -## Documentation - -Each non-example Go Module should have its own `README.md` containing: - -- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/). -- Brief description. -- Installation instructions (and requirements if applicable). -- Hyperlink to an example. Depending on the component the example can be: - - An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go). - - A sample Go application with its own `README.md`, like [here](example/zipkin). -- Additional documentation sections such us: - - Configuration, - - Contributing, - - References. - -[Here](exporters/jaeger/README.md) is an example of a concise `README.md`. - -Moreover, it should be possible to navigate to any `README.md` from the -root `README.md`. - -## Style Guide - -One of the primary goals of this project is that it is actually used by -developers. With this goal in mind the project strives to build -user-friendly and idiomatic Go code adhering to the Go community's best -practices. - -For a non-comprehensive but foundational overview of these best practices -the [Effective Go](https://golang.org/doc/effective_go.html) documentation -is an excellent starting place. - -As a convenience for developers building this project the `make precommit` -will format, lint, validate, and in some cases fix the changes you plan to -submit. This check will need to pass for your changes to be able to be -merged. - -In addition to idiomatic Go, the project has adopted certain standards for -implementations of common patterns. These standards should be followed as a -default, and if they are not followed documentation needs to be included as -to the reasons why. - -### Configuration - -When creating an instantiation function for a complex `type T struct`, it is -useful to allow variable number of options to be applied. However, the strong -type system of Go restricts the function design options. There are a few ways -to solve this problem, but we have landed on the following design. - -#### `config` - -Configuration should be held in a `struct` named `config`, or prefixed with -specific type name this Configuration applies to if there are multiple -`config` in the package. This type must contain configuration options. - -```go -// config contains configuration options for a thing. -type config struct { - // options ... -} -``` - -In general the `config` type will not need to be used externally to the -package and should be unexported. If, however, it is expected that the user -will likely want to build custom options for the configuration, the `config` -should be exported. Please, include in the documentation for the `config` -how the user can extend the configuration. - -It is important that internal `config` are not shared across package boundaries. -Meaning a `config` from one package should not be directly used by another. The -one exception is the API packages. The configs from the base API, eg. -`go.opentelemetry.io/otel/trace.TracerConfig` and -`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed -by the SDK therefor it is expected that these are exported. - -When a config is exported we want to maintain forward and backward -compatibility, to achieve this no fields should be exported but should -instead be accessed by methods. - -Optionally, it is common to include a `newConfig` function (with the same -naming scheme). This function wraps any defaults setting and looping over -all options to create a configured `config`. - -```go -// newConfig returns an appropriately configured config. -func newConfig(options ...Option) config { - // Set default values for config. - config := config{/* […] */} - for _, option := range options { - config = option.apply(config) - } - // Preform any validation here. - return config -} -``` - -If validation of the `config` options is also preformed this can return an -error as well that is expected to be handled by the instantiation function -or propagated to the user. - -Given the design goal of not having the user need to work with the `config`, -the `newConfig` function should also be unexported. - -#### `Option` - -To set the value of the options a `config` contains, a corresponding -`Option` interface type should be used. - -```go -type Option interface { - apply(config) config -} -``` - -Having `apply` unexported makes sure that it will not be used externally. -Moreover, the interface becomes sealed so the user cannot easily implement -the interface on its own. - -The `apply` method should return a modified version of the passed config. -This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. - -The name of the interface should be prefixed in the same way the -corresponding `config` is (if at all). - -#### Options - -All user configurable options for a `config` must have a related unexported -implementation of the `Option` interface and an exported configuration -function that wraps this implementation. - -The wrapping function name should be prefixed with `With*` (or in the -special case of a boolean options `Without*`) and should have the following -function signature. - -```go -func With*(…) Option { … } -``` - -##### `bool` Options - -```go -type defaultFalseOption bool - -func (o defaultFalseOption) apply(c config) config { - c.Bool = bool(o) - return c -} - -// WithOption sets a T to have an option included. -func WithOption() Option { - return defaultFalseOption(true) -} -``` - -```go -type defaultTrueOption bool - -func (o defaultTrueOption) apply(c config) config { - c.Bool = bool(o) - return c -} - -// WithoutOption sets a T to have Bool option excluded. -func WithoutOption() Option { - return defaultTrueOption(false) -} -``` - -##### Declared Type Options - -```go -type myTypeOption struct { - MyType MyType -} - -func (o myTypeOption) apply(c config) config { - c.MyType = o.MyType - return c -} - -// WithMyType sets T to have include MyType. -func WithMyType(t MyType) Option { - return myTypeOption{t} -} -``` - -##### Functional Options - -```go -type optionFunc func(config) config - -func (fn optionFunc) apply(c config) config { - return fn(c) -} - -// WithMyType sets t as MyType. -func WithMyType(t MyType) Option { - return optionFunc(func(c config) config { - c.MyType = t - return c - }) -} -``` - -#### Instantiation - -Using this configuration pattern to configure instantiation with a `NewT` -function. - -```go -func NewT(options ...Option) T {…} -``` - -Any required parameters can be declared before the variadic `options`. - -#### Dealing with Overlap - -Sometimes there are multiple complex `struct` that share common -configuration and also have distinct configuration. To avoid repeated -portions of `config`s, a common `config` can be used with the union of -options being handled with the `Option` interface. - -For example. - -```go -// config holds options for all animals. -type config struct { - Weight float64 - Color string - MaxAltitude float64 -} - -// DogOption apply Dog specific options. -type DogOption interface { - applyDog(config) config -} - -// BirdOption apply Bird specific options. -type BirdOption interface { - applyBird(config) config -} - -// Option apply options for all animals. -type Option interface { - BirdOption - DogOption -} - -type weightOption float64 - -func (o weightOption) applyDog(c config) config { - c.Weight = float64(o) - return c -} - -func (o weightOption) applyBird(c config) config { - c.Weight = float64(o) - return c -} - -func WithWeight(w float64) Option { return weightOption(w) } - -type furColorOption string - -func (o furColorOption) applyDog(c config) config { - c.Color = string(o) - return c -} - -func WithFurColor(c string) DogOption { return furColorOption(c) } - -type maxAltitudeOption float64 - -func (o maxAltitudeOption) applyBird(c config) config { - c.MaxAltitude = float64(o) - return c -} - -func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } - -func NewDog(name string, o ...DogOption) Dog {…} -func NewBird(name string, o ...BirdOption) Bird {…} -``` - -### Interfaces - -To allow other developers to better comprehend the code, it is important -to ensure it is sufficiently documented. One simple measure that contributes -to this aim is self-documenting by naming method parameters. Therefore, -where appropriate, methods of every exported interface type should have -their parameters appropriately named. - -#### Interface Stability - -All exported stable interfaces that include the following warning in their -doumentation are allowed to be extended with additional methods. - -> Warning: methods may be added to this interface in minor releases. - -Otherwise, stable interfaces MUST NOT be modified. - -If new functionality is needed for an interface that cannot be changed it MUST -be added by including an additional interface. That added interface can be a -simple interface for the specific functionality that you want to add or it can -be a super-set of the original interface. For example, if you wanted to a -`Close` method to the `Exporter` interface: - -```go -type Exporter interface { - Export() -} -``` - -A new interface, `Closer`, can be added: - -```go -type Closer interface { - Close() -} -``` - -Code that is passed the `Exporter` interface can now check to see if the passed -value also satisfies the new interface. E.g. - -```go -func caller(e Exporter) { - /* ... */ - if c, ok := e.(Closer); ok { - c.Close() - } - /* ... */ -} -``` - -Alternatively, a new type that is the super-set of an `Exporter` can be created. - -```go -type ClosingExporter struct { - Exporter - Close() -} -``` - -This new type can be used similar to the simple interface above in that a -passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type -and the `Close` method called. - -This super-set approach can be useful if there is explicit behavior that needs -to be coupled with the original type and passed as a unified type to a new -function, but, because of this coupling, it also limits the applicability of -the added functionality. If there exist other interfaces where this -functionality should be added, each one will need their own super-set -interfaces and will duplicate the pattern. For this reason, the simple targeted -interface that defines the specific functionality should be preferred. - -## Approvers and Maintainers - -Approvers: - -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [David Ashpole](https://github.com/dashpole), Google -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Robert Pająk](https://github.com/pellared), Splunk - -Maintainers: - -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Tyler Yahn](https://github.com/MrAlias), Splunk - -### Become an Approver or a Maintainer - -See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/go.opentelemetry.io/otel/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile deleted file mode 100644 index b085561dba..0000000000 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -TOOLS_MOD_DIR := ./internal/tools - -ALL_DOCS := $(shell find . -name '*.md' -type f | sort) -ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) -OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) -ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort) - -GO = go -TIMEOUT = 60 - -.DEFAULT_GOAL := precommit - -.PHONY: precommit ci -precommit: license-check misspell go-mod-tidy golangci-lint-fix test-default -ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage - -# Tools - -TOOLS = $(CURDIR)/.tools - -$(TOOLS): - @mkdir -p $@ -$(TOOLS)/%: | $(TOOLS) - cd $(TOOLS_MOD_DIR) && \ - $(GO) build -o $@ $(PACKAGE) - -MULTIMOD = $(TOOLS)/multimod -$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod - -SEMCONVGEN = $(TOOLS)/semconvgen -$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen - -CROSSLINK = $(TOOLS)/crosslink -$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/crosslink - -GOLANGCI_LINT = $(TOOLS)/golangci-lint -$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint - -MISSPELL = $(TOOLS)/misspell -$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell - -GOCOVMERGE = $(TOOLS)/gocovmerge -$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge - -STRINGER = $(TOOLS)/stringer -$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer - -PORTO = $(TOOLS)/porto -$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto - -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - -.PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) - -# Build - -.PHONY: generate build - -generate: $(OTEL_GO_MOD_DIRS:%=generate/%) -generate/%: DIR=$* -generate/%: | $(STRINGER) $(PORTO) - @echo "$(GO) generate $(DIR)/..." \ - && cd $(DIR) \ - && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w . - -build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) -build/%: DIR=$* -build/%: - @echo "$(GO) build $(DIR)/..." \ - && cd $(DIR) \ - && $(GO) build ./... - -build-tests/%: DIR=$* -build-tests/%: - @echo "$(GO) build tests $(DIR)/..." \ - && cd $(DIR) \ - && $(GO) list ./... \ - | grep -v third_party \ - | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null - -# Tests - -TEST_TARGETS := test-default test-bench test-short test-verbose test-race -.PHONY: $(TEST_TARGETS) test -test-default test-race: ARGS=-race -test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. -test-short: ARGS=-short -test-verbose: ARGS=-v -race -$(TEST_TARGETS): test -test: $(OTEL_GO_MOD_DIRS:%=test/%) -test/%: DIR=$* -test/%: - @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ - && cd $(DIR) \ - && $(GO) list ./... \ - | grep -v third_party \ - | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) - -COVERAGE_MODE = atomic -COVERAGE_PROFILE = coverage.out -.PHONY: test-coverage -test-coverage: | $(GOCOVMERGE) - @set -e; \ - printf "" > coverage.txt; \ - for dir in $(ALL_COVERAGE_MOD_DIRS); do \ - echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ - (cd "$${dir}" && \ - $(GO) list ./... \ - | grep -v third_party \ - | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ - $(GO) tool cover -html=coverage.out -o coverage.html); \ - done; \ - $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt - -.PHONY: golangci-lint golangci-lint-fix -golangci-lint-fix: ARGS=--fix -golangci-lint-fix: golangci-lint -golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) -golangci-lint/%: DIR=$* -golangci-lint/%: | $(GOLANGCI_LINT) - @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ - && cd $(DIR) \ - && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) - -.PHONY: crosslink -crosslink: | $(CROSSLINK) - @echo "cross-linking all go modules" \ - && $(CROSSLINK) - -.PHONY: go-mod-tidy -go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) -go-mod-tidy/%: DIR=$* -go-mod-tidy/%: | crosslink - @echo "$(GO) mod tidy in $(DIR)" \ - && cd $(DIR) \ - && $(GO) mod tidy - -.PHONY: lint-modules -lint-modules: go-mod-tidy - -.PHONY: lint -lint: misspell lint-modules golangci-lint - -.PHONY: vanity-import-check -vanity-import-check: | $(PORTO) - @$(PORTO) --include-internal -l . - -.PHONY: misspell -misspell: | $(MISSPELL) - @$(MISSPELL) -w $(ALL_DOCS) - -.PHONY: license-check -license-check: - @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*') ; do \ - awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -DEPENDABOT_PATH=./.github/dependabot.yml -.PHONY: dependabot-check -dependabot-check: - @result=$$( \ - for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.//' ); \ - do grep -q "directory: \+$$f" $(DEPENDABOT_PATH) \ - || echo "$$f"; \ - done; \ - ); \ - if [ -n "$$result" ]; then \ - echo "missing dependabot entry:"; echo "$$result"; \ - echo "new modules need to be added to the $(DEPENDABOT_PATH) file"; \ - exit 1; \ - fi - -.PHONY: check-clean-work-tree -check-clean-work-tree: - @if ! git diff --quiet; then \ - echo; \ - echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ - echo; \ - git status; \ - exit 1; \ - fi - -.PHONY: prerelease -prerelease: | $(MULTIMOD) - @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} - -COMMIT ?= "HEAD" -.PHONY: add-tags -add-tags: | $(MULTIMOD) - @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md deleted file mode 100644 index 21c2a71612..0000000000 --- a/vendor/go.opentelemetry.io/otel/README.md +++ /dev/null @@ -1,102 +0,0 @@ -# OpenTelemetry-Go - -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) -[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) -[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) -[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) - -OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). -It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. - -## Project Status - -| Signal | Status | Project | -| ------- | ---------- | ------- | -| Traces | Stable | N/A | -| Metrics | Alpha | N/A | -| Logs | Frozen [1] | N/A | - -- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics. - No Logs Pull Requests are currently being accepted. - -Progress and status specific to this repository is tracked in our local -[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) -and -[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). - -Project versioning information and stability guarantees can be found in the -[versioning documentation](./VERSIONING.md). - -### Compatibility - -OpenTelemetry-Go attempts to track the current supported versions of the -[Go language](https://golang.org/doc/devel/release#policy). The release -schedule after a new minor version of go is as follows: - -- The first release or one month, which ever is sooner, will add build steps for the new go version. -- The first release after three months will remove support for the oldest go version. - -This project is tested on the following systems. - -| OS | Go Version | Architecture | -| ------- | ---------- | ------------ | -| Ubuntu | 1.17 | amd64 | -| Ubuntu | 1.16 | amd64 | -| Ubuntu | 1.17 | 386 | -| Ubuntu | 1.16 | 386 | -| MacOS | 1.17 | amd64 | -| MacOS | 1.16 | amd64 | -| Windows | 1.17 | amd64 | -| Windows | 1.16 | amd64 | -| Windows | 1.17 | 386 | -| Windows | 1.16 | 386 | - -While this project should work for other systems, no compatibility guarantees -are made for those systems currently. - -## Getting Started - -You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). - -OpenTelemetry's goal is to provide a single set of APIs to capture distributed -traces and metrics from your application and send them to an observability -platform. This project allows you to do just that for applications written in -Go. There are two steps to this process: instrument your application, and -configure an exporter. - -### Instrumentation - -To start capturing distributed traces and metric events from your application -it first needs to be instrumented. The easiest way to do this is by using an -instrumentation library for your code. Be sure to check out [the officially -supported instrumentation -libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). - -If you need to extend the telemetry an instrumentation library provides or want -to build your own instrumentation for your application directly you will need -to use the -[go.opentelemetry.io/otel/api](https://pkg.go.dev/go.opentelemetry.io/otel/api) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. - -### Export - -Now that your application is instrumented to collect telemetry, it needs an -export pipeline to send that telemetry to an observability platform. - -All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). - -| Exporter | Metrics | Traces | -| :-----------------------------------: | :-----: | :----: | -| [Jaeger](./exporters/jaeger/) | | ✓ | -| [OTLP](./exporters/otlp/) | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | ✓ | - -Additionally, OpenTelemetry community supported exporters can be found in the [contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/exporters). - -## Contributing - -See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md deleted file mode 100644 index e3bff66c6a..0000000000 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ /dev/null @@ -1,132 +0,0 @@ -# Release Process - -## Semantic Convention Generation - -If a new version of the OpenTelemetry Specification has been released it will be necessary to generate a new -semantic convention package from the YAML definitions in the specification repository. There is a `semconvgen` utility -installed by `make tools` that can be used to generate the a package with the name matching the specification -version number under the `semconv` package. This will ideally be done soon after the specification release is -tagged. Make sure that the specification repo contains a checkout of the the latest tagged release so that the -generated files match the released semantic conventions. - -There are currently two categories of semantic conventions that must be generated, `resource` and `trace`. - -``` -.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/resource -t semconv/template.j2 -.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/trace -t semconv/template.j2 -``` - -Using default values for all options other than `input` will result in using the `template.j2` template to -generate `resource.go` and `trace.go` in `/path/to/otelgo/repo/semconv/`. - -There are several ancillary files that are not generated and should be copied into the new package from the -prior package, with updates made as appropriate to canonical import path statements and constant values. -These files include: - -* doc.go -* exception.go -* http(_test)?.go -* schema.go - -Uses of the previous schema version in this repository should be updated to use the newly generated version. -No tooling for this exists at present, so use find/replace in your editor of choice or craft a `grep | sed` -pipeline if you like living on the edge. - -## Pre-Release - -First, decide which module sets will be released and update their versions -in `versions.yaml`. Commit this change to a new branch. - -Update go.mod for submodules to depend on the new release which will happen in the next step. - -1. Run the `prerelease` make target. It creates a branch - `prerelease__` that will contain all release changes. - - ``` - make prerelease MODSET= - ``` - -2. Verify the changes. - - ``` - git diff ...prerelease__ - ``` - - This should have changed the version for all modules to be ``. - If these changes look correct, merge them into your pre-release branch: - - ```go - git merge prerelease__ - ``` - -3. Update the [Changelog](./CHANGELOG.md). - - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. - To verify this, you can look directly at the commits since the ``. - - ``` - git --no-pager log --pretty=oneline "..HEAD" - ``` - - - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). - - Update all the appropriate links at the bottom. - -4. Push the changes to upstream and create a Pull Request on GitHub. - Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. - -## Tag - -Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. - -***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! -Failure to do so will leave things in a broken state. As long as you do not -change `versions.yaml` between pre-release and this step, things should be fine. - -***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). -It is critical you make sure the version you push upstream is correct. -[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). - -1. For each module set that will be released, run the `add-tags` make target - using the `` of the commit on the main branch for the merged Pull Request. - - ``` - make add-tags MODSET= COMMIT= - ``` - - It should only be necessary to provide an explicit `COMMIT` value if the - current `HEAD` of your working directory is not the correct commit. - -2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). - Make sure you push all sub-modules as well. - - ``` - git push upstream - git push upstream - ... - ``` - -## Release - -Finally create a Release for the new `` on GitHub. -The release body should include all the release notes from the Changelog for this release. - -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - -## Post-Release - -### Contrib Repository - -Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. - -### Website Documentation - -Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). -Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md deleted file mode 100644 index 412f1e362b..0000000000 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ /dev/null @@ -1,224 +0,0 @@ -# Versioning - -This document describes the versioning policy for this repository. This policy -is designed so the following goals can be achieved. - -**Users are provided a codebase of value that is stable and secure.** - -## Policy - -* Versioning of this project will be idiomatic of a Go project using [Go - modules](https://github.com/golang/go/wiki/Modules). - * [Semantic import - versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) - will be used. - * Versions will comply with [semver - 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. - * New methods may be added to exported API interfaces. All exported - interfaces that fall within this exception will include the following - paragraph in their public documentation. - - > Warning: methods may be added to this interface in minor releases. - - * If a module is version `v2` or higher, the major version of the module - must be included as a `/vN` at the end of the module paths used in - `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require - go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path - (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the - paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a - `@v2.0.1` in that example. One way to think about it is that the module - name now includes the `/v2`, so include `/v2` whenever you are using the - module name). - * If a module is version `v0` or `v1`, do not include the major version in - either the module path or the import path. - * Modules will be used to encapsulate signals and components. - * Experimental modules still under active development will be versioned at - `v0` to imply the stability guarantee defined by - [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). - - > Major version zero (0.y.z) is for initial development. Anything MAY - > change at any time. The public API SHOULD NOT be considered stable. - - * Mature modules for which we guarantee a stable public API will be versioned - with a major version greater than `v0`. - * The decision to make a module stable will be made on a case-by-case - basis by the maintainers of this project. - * Experimental modules will start their versioning at `v0.0.0` and will - increment their minor version when backwards incompatible changes are - released and increment their patch version when backwards compatible - changes are released. - * All stable modules that use the same major version number will use the - same entire version number. - * Stable modules may be released with an incremented minor or patch - version even though that module has not been changed, but rather so - that it will remain at the same version as other stable modules that - did undergo change. - * When an experimental module becomes stable a new stable module version - will be released and will include this now stable module. The new - stable module version will be an increment of the minor version number - and will be applied to all existing stable modules as well as the newly - stable module being released. -* Versioning of the associated [contrib - repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of - this project will be idiomatic of a Go project using [Go - modules](https://github.com/golang/go/wiki/Modules). - * [Semantic import - versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) - will be used. - * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). - * If a module is version `v2` or higher, the - major version of the module must be included as a `/vN` at the end of the - module paths used in `go.mod` files (e.g., `module - go.opentelemetry.io/contrib/instrumentation/host/v2`, `require - go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the - package import path (e.g., `import - "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes - the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there - is both a `/v2` and a `@v2.0.1` in that example. One way to think about - it is that the module name now includes the `/v2`, so include `/v2` - whenever you are using the module name). - * If a module is version `v0` or `v1`, do not include the major version - in either the module path or the import path. - * In addition to public APIs, telemetry produced by stable instrumentation - will remain stable and backwards compatible. This is to avoid breaking - alerts and dashboard. - * Modules will be used to encapsulate instrumentation, detectors, exporters, - propagators, and any other independent sets of related components. - * Experimental modules still under active development will be versioned at - `v0` to imply the stability guarantee defined by - [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). - - > Major version zero (0.y.z) is for initial development. Anything MAY - > change at any time. The public API SHOULD NOT be considered stable. - - * Mature modules for which we guarantee a stable public API and telemetry will - be versioned with a major version greater than `v0`. - * Experimental modules will start their versioning at `v0.0.0` and will - increment their minor version when backwards incompatible changes are - released and increment their patch version when backwards compatible - changes are released. - * Stable contrib modules cannot depend on experimental modules from this - project. - * All stable contrib modules of the same major version with this project - will use the same entire version as this project. - * Stable modules may be released with an incremented minor or patch - version even though that module's code has not been changed. Instead - the only change that will have been included is to have updated that - modules dependency on this project's stable APIs. - * When an experimental module in contrib becomes stable a new stable - module version will be released and will include this now stable - module. The new stable module version will be an increment of the minor - version number and will be applied to all existing stable contrib - modules, this project's modules, and the newly stable module being - released. - * Contrib modules will be kept up to date with this project's releases. - * Due to the dependency contrib modules will implicitly have on this - project's modules the release of stable contrib modules to match the - released version number will be staggered after this project's release. - There is no explicit time guarantee for how long after this projects - release the contrib release will be. Effort should be made to keep them - as close in time as possible. - * No additional stable release in this project can be made until the - contrib repository has a matching stable release. - * No release can be made in the contrib repository after this project's - stable release except for a stable release of the contrib repository. -* GitHub releases will be made for all releases. -* Go modules will be made available at Go package mirrors. - -## Example Versioning Lifecycle - -To better understand the implementation of the above policy the following -example is provided. This project is simplified to include only the following -modules and their versions: - -* `otel`: `v0.14.0` -* `otel/trace`: `v0.14.0` -* `otel/metric`: `v0.14.0` -* `otel/baggage`: `v0.14.0` -* `otel/sdk/trace`: `v0.14.0` -* `otel/sdk/metric`: `v0.14.0` - -These modules have been developed to a point where the `otel/trace`, -`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they -should be considered for a stable release. The `otel/metric` and -`otel/sdk/metric` are still under active development and the `otel` module -depends on both `otel/trace` and `otel/metric`. - -The `otel` package is refactored to remove its dependencies on `otel/metric` so -it can be released as stable as well. With that done the following release -candidates are made: - -* `otel`: `v1.0.0-RC1` -* `otel/trace`: `v1.0.0-RC1` -* `otel/baggage`: `v1.0.0-RC1` -* `otel/sdk/trace`: `v1.0.0-RC1` - -The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. - -A few minor issues are discovered in the `otel/trace` package. These issues are -resolved with some minor, but backwards incompatible, changes and are released -as a second release candidate: - -* `otel`: `v1.0.0-RC2` -* `otel/trace`: `v1.0.0-RC2` -* `otel/baggage`: `v1.0.0-RC2` -* `otel/sdk/trace`: `v1.0.0-RC2` - -Notice that all module version numbers are incremented to adhere to our -versioning policy. - -After these release candidates have been evaluated to satisfaction, they are -released as version `v1.0.0`. - -* `otel`: `v1.0.0` -* `otel/trace`: `v1.0.0` -* `otel/baggage`: `v1.0.0` -* `otel/sdk/trace`: `v1.0.0` - -Since both the `go` utility and the Go module system support [the semantic -versioning definition of -precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release -will correctly be interpreted as the successor to the previous release -candidates. - -Active development of this project continues. The `otel/metric` module now has -backwards incompatible changes to its API that need to be released and the -`otel/baggage` module has a minor bug fix that needs to be released. The -following release is made: - -* `otel`: `v1.0.1` -* `otel/trace`: `v1.0.1` -* `otel/metric`: `v0.15.0` -* `otel/baggage`: `v1.0.1` -* `otel/sdk/trace`: `v1.0.1` -* `otel/sdk/metric`: `v0.15.0` - -Notice that, again, all stable module versions are incremented in unison and -the `otel/sdk/metric` package, which depends on the `otel/metric` package, also -bumped its version. This bump of the `otel/sdk/metric` package makes sense -given their coupling, though it is not explicitly required by our versioning -policy. - -As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a -point where they should be evaluated for stability. The `otel` module is -reintegrated with the `otel/metric` package and the following release is made: - -* `otel`: `v1.1.0-RC1` -* `otel/trace`: `v1.1.0-RC1` -* `otel/metric`: `v1.1.0-RC1` -* `otel/baggage`: `v1.1.0-RC1` -* `otel/sdk/trace`: `v1.1.0-RC1` -* `otel/sdk/metric`: `v1.1.0-RC1` - -All the modules are evaluated and determined to a viable stable release. They -are then released as version `v1.1.0` (the minor version is incremented to -indicate the addition of new signal). - -* `otel`: `v1.1.0` -* `otel/trace`: `v1.1.0` -* `otel/metric`: `v1.1.0` -* `otel/baggage`: `v1.1.0` -* `otel/sdk/trace`: `v1.1.0` -* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go deleted file mode 100644 index dafe7424df..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package attribute provides key and value attributes. -package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go deleted file mode 100644 index 8b940f78dc..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "bytes" - "sync" - "sync/atomic" -) - -type ( - // Encoder is a mechanism for serializing a label set into a - // specific string representation that supports caching, to - // avoid repeated serialization. An example could be an - // exporter encoding the label set into a wire representation. - Encoder interface { - // Encode returns the serialized encoding of the label - // set using its Iterator. This result may be cached - // by a attribute.Set. - Encode(iterator Iterator) string - - // ID returns a value that is unique for each class of - // label encoder. Label encoders allocate these using - // `NewEncoderID`. - ID() EncoderID - } - - // EncoderID is used to identify distinct Encoder - // implementations, for caching encoded results. - EncoderID struct { - value uint64 - } - - // defaultLabelEncoder uses a sync.Pool of buffers to reduce - // the number of allocations used in encoding labels. This - // implementation encodes a comma-separated list of key=value, - // with '/'-escaping of '=', ',', and '\'. - defaultLabelEncoder struct { - // pool is a pool of labelset builders. The buffers in this - // pool grow to a size that most label encodings will not - // allocate new memory. - pool sync.Pool // *bytes.Buffer - } -) - -// escapeChar is used to ensure uniqueness of the label encoding where -// keys or values contain either '=' or ','. Since there is no parser -// needed for this encoding and its only requirement is to be unique, -// this choice is arbitrary. Users will see these in some exporters -// (e.g., stdout), so the backslash ('\') is used as a conventional choice. -const escapeChar = '\\' - -var ( - _ Encoder = &defaultLabelEncoder{} - - // encoderIDCounter is for generating IDs for other label - // encoders. - encoderIDCounter uint64 - - defaultEncoderOnce sync.Once - defaultEncoderID = NewEncoderID() - defaultEncoderInstance *defaultLabelEncoder -) - -// NewEncoderID returns a unique label encoder ID. It should be -// called once per each type of label encoder. Preferably in init() or -// in var definition. -func NewEncoderID() EncoderID { - return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} -} - -// DefaultEncoder returns a label encoder that encodes labels -// in such a way that each escaped label's key is followed by an equal -// sign and then by an escaped label's value. All key-value pairs are -// separated by a comma. -// -// Escaping is done by prepending a backslash before either a -// backslash, equal sign or a comma. -func DefaultEncoder() Encoder { - defaultEncoderOnce.Do(func() { - defaultEncoderInstance = &defaultLabelEncoder{ - pool: sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, - }, - } - }) - return defaultEncoderInstance -} - -// Encode is a part of an implementation of the LabelEncoder -// interface. -func (d *defaultLabelEncoder) Encode(iter Iterator) string { - buf := d.pool.Get().(*bytes.Buffer) - defer d.pool.Put(buf) - buf.Reset() - - for iter.Next() { - i, keyValue := iter.IndexedLabel() - if i > 0 { - _, _ = buf.WriteRune(',') - } - copyAndEscape(buf, string(keyValue.Key)) - - _, _ = buf.WriteRune('=') - - if keyValue.Value.Type() == STRING { - copyAndEscape(buf, keyValue.Value.AsString()) - } else { - _, _ = buf.WriteString(keyValue.Value.Emit()) - } - } - return buf.String() -} - -// ID is a part of an implementation of the LabelEncoder interface. -func (*defaultLabelEncoder) ID() EncoderID { - return defaultEncoderID -} - -// copyAndEscape escapes `=`, `,` and its own escape character (`\`), -// making the default encoding unique. -func copyAndEscape(buf *bytes.Buffer, val string) { - for _, ch := range val { - switch ch { - case '=', ',', escapeChar: - buf.WriteRune(escapeChar) - } - buf.WriteRune(ch) - } -} - -// Valid returns true if this encoder ID was allocated by -// `NewEncoderID`. Invalid encoder IDs will not be cached. -func (id EncoderID) Valid() bool { - return id.value != 0 -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go deleted file mode 100644 index e03aabb62b..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -// Iterator allows iterating over the set of labels in order, -// sorted by key. -type Iterator struct { - storage *Set - idx int -} - -// MergeIterator supports iterating over two sets of labels while -// eliminating duplicate values from the combined set. The first -// iterator value takes precedence. -type MergeIterator struct { - one oneIterator - two oneIterator - current KeyValue -} - -type oneIterator struct { - iter Iterator - done bool - label KeyValue -} - -// Next moves the iterator to the next position. Returns false if there -// are no more labels. -func (i *Iterator) Next() bool { - i.idx++ - return i.idx < i.Len() -} - -// Label returns current KeyValue. Must be called only after Next returns -// true. -func (i *Iterator) Label() KeyValue { - kv, _ := i.storage.Get(i.idx) - return kv -} - -// Attribute is a synonym for Label(). -func (i *Iterator) Attribute() KeyValue { - return i.Label() -} - -// IndexedLabel returns current index and attribute. Must be called only -// after Next returns true. -func (i *Iterator) IndexedLabel() (int, KeyValue) { - return i.idx, i.Label() -} - -// Len returns a number of labels in the iterator's `*Set`. -func (i *Iterator) Len() int { - return i.storage.Len() -} - -// ToSlice is a convenience function that creates a slice of labels -// from the passed iterator. The iterator is set up to start from the -// beginning before creating the slice. -func (i *Iterator) ToSlice() []KeyValue { - l := i.Len() - if l == 0 { - return nil - } - i.idx = -1 - slice := make([]KeyValue, 0, l) - for i.Next() { - slice = append(slice, i.Label()) - } - return slice -} - -// NewMergeIterator returns a MergeIterator for merging two label sets -// Duplicates are resolved by taking the value from the first set. -func NewMergeIterator(s1, s2 *Set) MergeIterator { - mi := MergeIterator{ - one: makeOne(s1.Iter()), - two: makeOne(s2.Iter()), - } - return mi -} - -func makeOne(iter Iterator) oneIterator { - oi := oneIterator{ - iter: iter, - } - oi.advance() - return oi -} - -func (oi *oneIterator) advance() { - if oi.done = !oi.iter.Next(); !oi.done { - oi.label = oi.iter.Label() - } -} - -// Next returns true if there is another label available. -func (m *MergeIterator) Next() bool { - if m.one.done && m.two.done { - return false - } - if m.one.done { - m.current = m.two.label - m.two.advance() - return true - } - if m.two.done { - m.current = m.one.label - m.one.advance() - return true - } - if m.one.label.Key == m.two.label.Key { - m.current = m.one.label // first iterator label value wins - m.one.advance() - m.two.advance() - return true - } - if m.one.label.Key < m.two.label.Key { - m.current = m.one.label - m.one.advance() - return true - } - m.current = m.two.label - m.two.advance() - return true -} - -// Label returns the current value after Next() returns true. -func (m *MergeIterator) Label() KeyValue { - return m.current -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go deleted file mode 100644 index 0656a04e43..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -// Key represents the key part in key-value pairs. It's a string. The -// allowed character set in the key depends on the use of the key. -type Key string - -// Bool creates a KeyValue instance with a BOOL Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Bool(name, value). -func (k Key) Bool(v bool) KeyValue { - return KeyValue{ - Key: k, - Value: BoolValue(v), - } -} - -// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- BoolSlice(name, value). -func (k Key) BoolSlice(v []bool) KeyValue { - return KeyValue{ - Key: k, - Value: BoolSliceValue(v), - } -} - -// Int creates a KeyValue instance with an INT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int(name, value). -func (k Key) Int(v int) KeyValue { - return KeyValue{ - Key: k, - Value: IntValue(v), - } -} - -// IntSlice creates a KeyValue instance with an INT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- IntSlice(name, value). -func (k Key) IntSlice(v []int) KeyValue { - return KeyValue{ - Key: k, - Value: IntSliceValue(v), - } -} - -// Int64 creates a KeyValue instance with an INT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int64(name, value). -func (k Key) Int64(v int64) KeyValue { - return KeyValue{ - Key: k, - Value: Int64Value(v), - } -} - -// Int64Slice creates a KeyValue instance with an INT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int64Slice(name, value). -func (k Key) Int64Slice(v []int64) KeyValue { - return KeyValue{ - Key: k, - Value: Int64SliceValue(v), - } -} - -// Float64 creates a KeyValue instance with a FLOAT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Float64(name, value). -func (k Key) Float64(v float64) KeyValue { - return KeyValue{ - Key: k, - Value: Float64Value(v), - } -} - -// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Float64(name, value). -func (k Key) Float64Slice(v []float64) KeyValue { - return KeyValue{ - Key: k, - Value: Float64SliceValue(v), - } -} - -// String creates a KeyValue instance with a STRING Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- String(name, value). -func (k Key) String(v string) KeyValue { - return KeyValue{ - Key: k, - Value: StringValue(v), - } -} - -// StringSlice creates a KeyValue instance with a STRINGSLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- StringSlice(name, value). -func (k Key) StringSlice(v []string) KeyValue { - return KeyValue{ - Key: k, - Value: StringSliceValue(v), - } -} - -// Defined returns true for non-empty keys. -func (k Key) Defined() bool { - return len(k) != 0 -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go deleted file mode 100644 index 1ddf3ce058..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "fmt" -) - -// KeyValue holds a key and value pair. -type KeyValue struct { - Key Key - Value Value -} - -// Valid returns if kv is a valid OpenTelemetry attribute. -func (kv KeyValue) Valid() bool { - return kv.Key.Defined() && kv.Value.Type() != INVALID -} - -// Bool creates a KeyValue with a BOOL Value type. -func Bool(k string, v bool) KeyValue { - return Key(k).Bool(v) -} - -// BoolSlice creates a KeyValue with a BOOLSLICE Value type. -func BoolSlice(k string, v []bool) KeyValue { - return Key(k).BoolSlice(v) -} - -// Int creates a KeyValue with an INT64 Value type. -func Int(k string, v int) KeyValue { - return Key(k).Int(v) -} - -// IntSlice creates a KeyValue with an INT64SLICE Value type. -func IntSlice(k string, v []int) KeyValue { - return Key(k).IntSlice(v) -} - -// Int64 creates a KeyValue with an INT64 Value type. -func Int64(k string, v int64) KeyValue { - return Key(k).Int64(v) -} - -// Int64Slice creates a KeyValue with an INT64SLICE Value type. -func Int64Slice(k string, v []int64) KeyValue { - return Key(k).Int64Slice(v) -} - -// Float64 creates a KeyValue with a FLOAT64 Value type. -func Float64(k string, v float64) KeyValue { - return Key(k).Float64(v) -} - -// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. -func Float64Slice(k string, v []float64) KeyValue { - return Key(k).Float64Slice(v) -} - -// String creates a KeyValue with a STRING Value type. -func String(k, v string) KeyValue { - return Key(k).String(v) -} - -// StringSlice creates a KeyValue with a STRINGSLICE Value type. -func StringSlice(k string, v []string) KeyValue { - return Key(k).StringSlice(v) -} - -// Stringer creates a new key-value pair with a passed name and a string -// value generated by the passed Stringer interface. -func Stringer(k string, v fmt.Stringer) KeyValue { - return Key(k).String(v.String()) -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go deleted file mode 100644 index a28f1435cb..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "encoding/json" - "reflect" - "sort" -) - -type ( - // Set is the representation for a distinct label set. It - // manages an immutable set of labels, with an internal cache - // for storing label encodings. - // - // This type supports the `Equivalent` method of comparison - // using values of type `Distinct`. - // - // This type is used to implement: - // 1. Metric labels - // 2. Resource sets - // 3. Correlation map (TODO) - Set struct { - equivalent Distinct - } - - // Distinct wraps a variable-size array of `KeyValue`, - // constructed with keys in sorted order. This can be used as - // a map key or for equality checking between Sets. - Distinct struct { - iface interface{} - } - - // Filter supports removing certain labels from label sets. - // When the filter returns true, the label will be kept in - // the filtered label set. When the filter returns false, the - // label is excluded from the filtered label set, and the - // label instead appears in the `removed` list of excluded labels. - Filter func(KeyValue) bool - - // Sortable implements `sort.Interface`, used for sorting - // `KeyValue`. This is an exported type to support a - // memory optimization. A pointer to one of these is needed - // for the call to `sort.Stable()`, which the caller may - // provide in order to avoid an allocation. See - // `NewSetWithSortable()`. - Sortable []KeyValue -) - -var ( - // keyValueType is used in `computeDistinctReflect`. - keyValueType = reflect.TypeOf(KeyValue{}) - - // emptySet is returned for empty label sets. - emptySet = &Set{ - equivalent: Distinct{ - iface: [0]KeyValue{}, - }, - } -) - -// EmptySet returns a reference to a Set with no elements. -// -// This is a convenience provided for optimized calling utility. -func EmptySet() *Set { - return emptySet -} - -// reflect abbreviates `reflect.ValueOf`. -func (d Distinct) reflect() reflect.Value { - return reflect.ValueOf(d.iface) -} - -// Valid returns true if this value refers to a valid `*Set`. -func (d Distinct) Valid() bool { - return d.iface != nil -} - -// Len returns the number of labels in this set. -func (l *Set) Len() int { - if l == nil || !l.equivalent.Valid() { - return 0 - } - return l.equivalent.reflect().Len() -} - -// Get returns the KeyValue at ordered position `idx` in this set. -func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil { - return KeyValue{}, false - } - value := l.equivalent.reflect() - - if idx >= 0 && idx < value.Len() { - // Note: The Go compiler successfully avoids an allocation for - // the interface{} conversion here: - return value.Index(idx).Interface().(KeyValue), true - } - - return KeyValue{}, false -} - -// Value returns the value of a specified key in this set. -func (l *Set) Value(k Key) (Value, bool) { - if l == nil { - return Value{}, false - } - rValue := l.equivalent.reflect() - vlen := rValue.Len() - - idx := sort.Search(vlen, func(idx int) bool { - return rValue.Index(idx).Interface().(KeyValue).Key >= k - }) - if idx >= vlen { - return Value{}, false - } - keyValue := rValue.Index(idx).Interface().(KeyValue) - if k == keyValue.Key { - return keyValue.Value, true - } - return Value{}, false -} - -// HasValue tests whether a key is defined in this set. -func (l *Set) HasValue(k Key) bool { - if l == nil { - return false - } - _, ok := l.Value(k) - return ok -} - -// Iter returns an iterator for visiting the labels in this set. -func (l *Set) Iter() Iterator { - return Iterator{ - storage: l, - idx: -1, - } -} - -// ToSlice returns the set of labels belonging to this set, sorted, -// where keys appear no more than once. -func (l *Set) ToSlice() []KeyValue { - iter := l.Iter() - return iter.ToSlice() -} - -// Equivalent returns a value that may be used as a map key. The -// Distinct type guarantees that the result will equal the equivalent -// Distinct value of any label set with the same elements as this, -// where sets are made unique by choosing the last value in the input -// for any given key. -func (l *Set) Equivalent() Distinct { - if l == nil || !l.equivalent.Valid() { - return emptySet.equivalent - } - return l.equivalent -} - -// Equals returns true if the argument set is equivalent to this set. -func (l *Set) Equals(o *Set) bool { - return l.Equivalent() == o.Equivalent() -} - -// Encoded returns the encoded form of this set, according to -// `encoder`. -func (l *Set) Encoded(encoder Encoder) string { - if l == nil || encoder == nil { - return "" - } - - return encoder.Encode(l.Iter()) -} - -func empty() Set { - return Set{ - equivalent: emptySet.equivalent, - } -} - -// NewSet returns a new `Set`. See the documentation for -// `NewSetWithSortableFiltered` for more details. -// -// Except for empty sets, this method adds an additional allocation -// compared with calls that include a `*Sortable`. -func NewSet(kvs ...KeyValue) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil) - return s -} - -// NewSetWithSortable returns a new `Set`. See the documentation for -// `NewSetWithSortableFiltered` for more details. -// -// This call includes a `*Sortable` option as a memory optimization. -func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) - return s -} - -// NewSetWithFiltered returns a new `Set`. See the documentation for -// `NewSetWithSortableFiltered` for more details. -// -// This call includes a `Filter` to include/exclude label keys from -// the return value. Excluded keys are returned as a slice of label -// values. -func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - return NewSetWithSortableFiltered(kvs, new(Sortable), filter) -} - -// NewSetWithSortableFiltered returns a new `Set`. -// -// Duplicate keys are eliminated by taking the last value. This -// re-orders the input slice so that unique last-values are contiguous -// at the end of the slice. -// -// This ensures the following: -// -// - Last-value-wins semantics -// - Caller sees the reordering, but doesn't lose values -// - Repeated call preserve last-value wins. -// -// Note that methods are defined on `*Set`, although this returns `Set`. -// Callers can avoid memory allocations by: -// -// - allocating a `Sortable` for use as a temporary in this method -// - allocating a `Set` for storing the return value of this -// constructor. -// -// The result maintains a cache of encoded labels, by attribute.EncoderID. -// This value should not be copied after its first use. -// -// The second `[]KeyValue` return value is a list of labels that were -// excluded by the Filter (if non-nil). -func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - - *tmp = kvs - - // Stable sort so the following de-duplication can implement - // last-value-wins semantics. - sort.Stable(tmp) - - *tmp = nil - - position := len(kvs) - 1 - offset := position - 1 - - // The requirements stated above require that the stable - // result be placed in the end of the input slice, while - // overwritten values are swapped to the beginning. - // - // De-duplicate with last-value-wins semantics. Preserve - // duplicate values at the beginning of the input slice. - for ; offset >= 0; offset-- { - if kvs[offset].Key == kvs[position].Key { - continue - } - position-- - kvs[offset], kvs[position] = kvs[position], kvs[offset] - } - if filter != nil { - return filterSet(kvs[position:], filter) - } - return Set{ - equivalent: computeDistinct(kvs[position:]), - }, nil -} - -// filterSet reorders `kvs` so that included keys are contiguous at -// the end of the slice, while excluded keys precede the included keys. -func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { - var excluded []KeyValue - - // Move labels that do not match the filter so - // they're adjacent before calling computeDistinct(). - distinctPosition := len(kvs) - - // Swap indistinct keys forward and distinct keys toward the - // end of the slice. - offset := len(kvs) - 1 - for ; offset >= 0; offset-- { - if filter(kvs[offset]) { - distinctPosition-- - kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset] - continue - } - } - excluded = kvs[:distinctPosition] - - return Set{ - equivalent: computeDistinct(kvs[distinctPosition:]), - }, excluded -} - -// Filter returns a filtered copy of this `Set`. See the -// documentation for `NewSetWithSortableFiltered` for more details. -func (l *Set) Filter(re Filter) (Set, []KeyValue) { - if re == nil { - return Set{ - equivalent: l.equivalent, - }, nil - } - - // Note: This could be refactored to avoid the temporary slice - // allocation, if it proves to be expensive. - return filterSet(l.ToSlice(), re) -} - -// computeDistinct returns a `Distinct` using either the fixed- or -// reflect-oriented code path, depending on the size of the input. -// The input slice is assumed to already be sorted and de-duplicated. -func computeDistinct(kvs []KeyValue) Distinct { - iface := computeDistinctFixed(kvs) - if iface == nil { - iface = computeDistinctReflect(kvs) - } - return Distinct{ - iface: iface, - } -} - -// computeDistinctFixed computes a `Distinct` for small slices. It -// returns nil if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) interface{} { - switch len(kvs) { - case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - default: - return nil - } -} - -// computeDistinctReflect computes a `Distinct` using reflection, -// works for any size input. -func computeDistinctReflect(kvs []KeyValue) interface{} { - at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() - for i, keyValue := range kvs { - *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue - } - return at.Interface() -} - -// MarshalJSON returns the JSON encoding of the `*Set`. -func (l *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(l.equivalent.iface) -} - -// MarshalLog is the marshaling function used by the logging system to represent this exporter. -func (l Set) MarshalLog() interface{} { - kvs := make(map[string]string) - for _, kv := range l.ToSlice() { - kvs[string(kv.Key)] = kv.Value.Emit() - } - return kvs -} - -// Len implements `sort.Interface`. -func (l *Sortable) Len() int { - return len(*l) -} - -// Swap implements `sort.Interface`. -func (l *Sortable) Swap(i, j int) { - (*l)[i], (*l)[j] = (*l)[j], (*l)[i] -} - -// Less implements `sort.Interface`. -func (l *Sortable) Less(i, j int) bool { - return (*l)[i].Key < (*l)[j].Key -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go deleted file mode 100644 index e584b24776..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ /dev/null @@ -1,31 +0,0 @@ -// Code generated by "stringer -type=Type"; DO NOT EDIT. - -package attribute - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[INVALID-0] - _ = x[BOOL-1] - _ = x[INT64-2] - _ = x[FLOAT64-3] - _ = x[STRING-4] - _ = x[BOOLSLICE-5] - _ = x[INT64SLICE-6] - _ = x[FLOAT64SLICE-7] - _ = x[STRINGSLICE-8] -} - -const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" - -var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} - -func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { - return "Type(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Type_name[_Type_index[i]:_Type_index[i+1]] -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go deleted file mode 100644 index 6ec5cb290d..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "encoding/json" - "fmt" - "strconv" - - "go.opentelemetry.io/otel/internal" -) - -//go:generate stringer -type=Type - -// Type describes the type of the data Value holds. -type Type int - -// Value represents the value part in key-value pairs. -type Value struct { - vtype Type - numeric uint64 - stringly string - slice interface{} -} - -const ( - // INVALID is used for a Value with no value set. - INVALID Type = iota - // BOOL is a boolean Type Value. - BOOL - // INT64 is a 64-bit signed integral Type Value. - INT64 - // FLOAT64 is a 64-bit floating point Type Value. - FLOAT64 - // STRING is a string Type Value. - STRING - // BOOLSLICE is a slice of booleans Type Value. - BOOLSLICE - // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. - INT64SLICE - // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. - FLOAT64SLICE - // STRINGSLICE is a slice of strings Type Value. - STRINGSLICE -) - -// BoolValue creates a BOOL Value. -func BoolValue(v bool) Value { - return Value{ - vtype: BOOL, - numeric: internal.BoolToRaw(v), - } -} - -// BoolSliceValue creates a BOOLSLICE Value. -func BoolSliceValue(v []bool) Value { - cp := make([]bool, len(v)) - copy(cp, v) - return Value{ - vtype: BOOLSLICE, - slice: &cp, - } -} - -// IntValue creates an INT64 Value. -func IntValue(v int) Value { - return Int64Value(int64(v)) -} - -// IntSliceValue creates an INTSLICE Value. -func IntSliceValue(v []int) Value { - cp := make([]int64, 0, len(v)) - for _, i := range v { - cp = append(cp, int64(i)) - } - return Value{ - vtype: INT64SLICE, - slice: &cp, - } -} - -// Int64Value creates an INT64 Value. -func Int64Value(v int64) Value { - return Value{ - vtype: INT64, - numeric: internal.Int64ToRaw(v), - } -} - -// Int64SliceValue creates an INT64SLICE Value. -func Int64SliceValue(v []int64) Value { - cp := make([]int64, len(v)) - copy(cp, v) - return Value{ - vtype: INT64SLICE, - slice: &cp, - } -} - -// Float64Value creates a FLOAT64 Value. -func Float64Value(v float64) Value { - return Value{ - vtype: FLOAT64, - numeric: internal.Float64ToRaw(v), - } -} - -// Float64SliceValue creates a FLOAT64SLICE Value. -func Float64SliceValue(v []float64) Value { - cp := make([]float64, len(v)) - copy(cp, v) - return Value{ - vtype: FLOAT64SLICE, - slice: &cp, - } -} - -// StringValue creates a STRING Value. -func StringValue(v string) Value { - return Value{ - vtype: STRING, - stringly: v, - } -} - -// StringSliceValue creates a STRINGSLICE Value. -func StringSliceValue(v []string) Value { - cp := make([]string, len(v)) - copy(cp, v) - return Value{ - vtype: STRINGSLICE, - slice: &cp, - } -} - -// Type returns a type of the Value. -func (v Value) Type() Type { - return v.vtype -} - -// AsBool returns the bool value. Make sure that the Value's type is -// BOOL. -func (v Value) AsBool() bool { - return internal.RawToBool(v.numeric) -} - -// AsBoolSlice returns the []bool value. Make sure that the Value's type is -// BOOLSLICE. -func (v Value) AsBoolSlice() []bool { - if s, ok := v.slice.(*[]bool); ok { - return *s - } - return nil -} - -// AsInt64 returns the int64 value. Make sure that the Value's type is -// INT64. -func (v Value) AsInt64() int64 { - return internal.RawToInt64(v.numeric) -} - -// AsInt64Slice returns the []int64 value. Make sure that the Value's type is -// INT64SLICE. -func (v Value) AsInt64Slice() []int64 { - if s, ok := v.slice.(*[]int64); ok { - return *s - } - return nil -} - -// AsFloat64 returns the float64 value. Make sure that the Value's -// type is FLOAT64. -func (v Value) AsFloat64() float64 { - return internal.RawToFloat64(v.numeric) -} - -// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is -// FLOAT64SLICE. -func (v Value) AsFloat64Slice() []float64 { - if s, ok := v.slice.(*[]float64); ok { - return *s - } - return nil -} - -// AsString returns the string value. Make sure that the Value's type -// is STRING. -func (v Value) AsString() string { - return v.stringly -} - -// AsStringSlice returns the []string value. Make sure that the Value's type is -// STRINGSLICE. -func (v Value) AsStringSlice() []string { - if s, ok := v.slice.(*[]string); ok { - return *s - } - return nil -} - -type unknownValueType struct{} - -// AsInterface returns Value's data as interface{}. -func (v Value) AsInterface() interface{} { - switch v.Type() { - case BOOL: - return v.AsBool() - case BOOLSLICE: - return v.AsBoolSlice() - case INT64: - return v.AsInt64() - case INT64SLICE: - return v.AsInt64Slice() - case FLOAT64: - return v.AsFloat64() - case FLOAT64SLICE: - return v.AsFloat64Slice() - case STRING: - return v.stringly - case STRINGSLICE: - return v.AsStringSlice() - } - return unknownValueType{} -} - -// Emit returns a string representation of Value's data. -func (v Value) Emit() string { - switch v.Type() { - case BOOLSLICE: - return fmt.Sprint(*(v.slice.(*[]bool))) - case BOOL: - return strconv.FormatBool(v.AsBool()) - case INT64SLICE: - return fmt.Sprint(*(v.slice.(*[]int64))) - case INT64: - return strconv.FormatInt(v.AsInt64(), 10) - case FLOAT64SLICE: - return fmt.Sprint(*(v.slice.(*[]float64))) - case FLOAT64: - return fmt.Sprint(v.AsFloat64()) - case STRINGSLICE: - return fmt.Sprint(*(v.slice.(*[]string))) - case STRING: - return v.stringly - default: - return "unknown" - } -} - -// MarshalJSON returns the JSON encoding of the Value. -func (v Value) MarshalJSON() ([]byte, error) { - var jsonVal struct { - Type string - Value interface{} - } - jsonVal.Type = v.Type().String() - jsonVal.Value = v.AsInterface() - return json.Marshal(jsonVal) -} diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go deleted file mode 100644 index 824c67b27a..0000000000 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package baggage // import "go.opentelemetry.io/otel/baggage" - -import ( - "errors" - "fmt" - "net/url" - "regexp" - "strings" - - "go.opentelemetry.io/otel/internal/baggage" -) - -const ( - maxMembers = 180 - maxBytesPerMembers = 4096 - maxBytesPerBaggageString = 8192 - - listDelimiter = "," - keyValueDelimiter = "=" - propertyDelimiter = ";" - - keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)` - valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)` - keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*` -) - -var ( - keyRe = regexp.MustCompile(`^` + keyDef + `$`) - valueRe = regexp.MustCompile(`^` + valueDef + `$`) - propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`) -) - -var ( - errInvalidKey = errors.New("invalid key") - errInvalidValue = errors.New("invalid value") - errInvalidProperty = errors.New("invalid baggage list-member property") - errInvalidMember = errors.New("invalid baggage list-member") - errMemberNumber = errors.New("too many list-members in baggage-string") - errMemberBytes = errors.New("list-member too large") - errBaggageBytes = errors.New("baggage-string too large") -) - -// Property is an additional metadata entry for a baggage list-member. -type Property struct { - key, value string - - // hasValue indicates if a zero-value value means the property does not - // have a value or if it was the zero-value. - hasValue bool - - // hasData indicates whether the created property contains data or not. - // Properties that do not contain data are invalid with no other check - // required. - hasData bool -} - -func NewKeyProperty(key string) (Property, error) { - if !keyRe.MatchString(key) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - - p := Property{key: key, hasData: true} - return p, nil -} - -func NewKeyValueProperty(key, value string) (Property, error) { - if !keyRe.MatchString(key) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - if !valueRe.MatchString(value) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - - p := Property{ - key: key, - value: value, - hasValue: true, - hasData: true, - } - return p, nil -} - -func newInvalidProperty() Property { - return Property{} -} - -// parseProperty attempts to decode a Property from the passed string. It -// returns an error if the input is invalid according to the W3C Baggage -// specification. -func parseProperty(property string) (Property, error) { - if property == "" { - return newInvalidProperty(), nil - } - - match := propertyRe.FindStringSubmatch(property) - if len(match) != 4 { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) - } - - p := Property{hasData: true} - if match[1] != "" { - p.key = match[1] - } else { - p.key = match[2] - p.value = match[3] - p.hasValue = true - } - - return p, nil -} - -// validate ensures p conforms to the W3C Baggage specification, returning an -// error otherwise. -func (p Property) validate() error { - errFunc := func(err error) error { - return fmt.Errorf("invalid property: %w", err) - } - - if !p.hasData { - return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p)) - } - - if !keyRe.MatchString(p.key) { - return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) - } - if p.hasValue && !valueRe.MatchString(p.value) { - return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) - } - if !p.hasValue && p.value != "" { - return errFunc(errors.New("inconsistent value")) - } - return nil -} - -// Key returns the Property key. -func (p Property) Key() string { - return p.key -} - -// Value returns the Property value. Additionally a boolean value is returned -// indicating if the returned value is the empty if the Property has a value -// that is empty or if the value is not set. -func (p Property) Value() (string, bool) { - return p.value, p.hasValue -} - -// String encodes Property into a string compliant with the W3C Baggage -// specification. -func (p Property) String() string { - if p.hasValue { - return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value) - } - return p.key -} - -type properties []Property - -func fromInternalProperties(iProps []baggage.Property) properties { - if len(iProps) == 0 { - return nil - } - - props := make(properties, len(iProps)) - for i, p := range iProps { - props[i] = Property{ - key: p.Key, - value: p.Value, - hasValue: p.HasValue, - } - } - return props -} - -func (p properties) asInternal() []baggage.Property { - if len(p) == 0 { - return nil - } - - iProps := make([]baggage.Property, len(p)) - for i, prop := range p { - iProps[i] = baggage.Property{ - Key: prop.key, - Value: prop.value, - HasValue: prop.hasValue, - } - } - return iProps -} - -func (p properties) Copy() properties { - if len(p) == 0 { - return nil - } - - props := make(properties, len(p)) - copy(props, p) - return props -} - -// validate ensures each Property in p conforms to the W3C Baggage -// specification, returning an error otherwise. -func (p properties) validate() error { - for _, prop := range p { - if err := prop.validate(); err != nil { - return err - } - } - return nil -} - -// String encodes properties into a string compliant with the W3C Baggage -// specification. -func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() - } - return strings.Join(props, propertyDelimiter) -} - -// Member is a list-member of a baggage-string as defined by the W3C Baggage -// specification. -type Member struct { - key, value string - properties properties - - // hasData indicates whether the created property contains data or not. - // Properties that do not contain data are invalid with no other check - // required. - hasData bool -} - -// NewMember returns a new Member from the passed arguments. An error is -// returned if the created Member would be invalid according to the W3C -// Baggage specification. -func NewMember(key, value string, props ...Property) (Member, error) { - m := Member{ - key: key, - value: value, - properties: properties(props).Copy(), - hasData: true, - } - if err := m.validate(); err != nil { - return newInvalidMember(), err - } - - return m, nil -} - -func newInvalidMember() Member { - return Member{} -} - -// parseMember attempts to decode a Member from the passed string. It returns -// an error if the input is invalid according to the W3C Baggage -// specification. -func parseMember(member string) (Member, error) { - if n := len(member); n > maxBytesPerMembers { - return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) - } - - var ( - key, value string - props properties - ) - - parts := strings.SplitN(member, propertyDelimiter, 2) - switch len(parts) { - case 2: - // Parse the member properties. - for _, pStr := range strings.Split(parts[1], propertyDelimiter) { - p, err := parseProperty(pStr) - if err != nil { - return newInvalidMember(), err - } - props = append(props, p) - } - fallthrough - case 1: - // Parse the member key/value pair. - - // Take into account a value can contain equal signs (=). - kv := strings.SplitN(parts[0], keyValueDelimiter, 2) - if len(kv) != 2 { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) - } - // "Leading and trailing whitespaces are allowed but MUST be trimmed - // when converting the header into a data structure." - key = strings.TrimSpace(kv[0]) - var err error - value, err = url.QueryUnescape(strings.TrimSpace(kv[1])) - if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %q", err, value) - } - if !keyRe.MatchString(key) { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - if !valueRe.MatchString(value) { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - default: - // This should never happen unless a developer has changed the string - // splitting somehow. Panic instead of failing silently and allowing - // the bug to slip past the CI checks. - panic("failed to parse baggage member") - } - - return Member{key: key, value: value, properties: props, hasData: true}, nil -} - -// validate ensures m conforms to the W3C Baggage specification, returning an -// error otherwise. -func (m Member) validate() error { - if !m.hasData { - return fmt.Errorf("%w: %q", errInvalidMember, m) - } - - if !keyRe.MatchString(m.key) { - return fmt.Errorf("%w: %q", errInvalidKey, m.key) - } - if !valueRe.MatchString(m.value) { - return fmt.Errorf("%w: %q", errInvalidValue, m.value) - } - return m.properties.validate() -} - -// Key returns the Member key. -func (m Member) Key() string { return m.key } - -// Value returns the Member value. -func (m Member) Value() string { return m.value } - -// Properties returns a copy of the Member properties. -func (m Member) Properties() []Property { return m.properties.Copy() } - -// String encodes Member into a string compliant with the W3C Baggage -// specification. -func (m Member) String() string { - // A key is just an ASCII string, but a value is URL encoded UTF-8. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value)) - if len(m.properties) > 0 { - s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) - } - return s -} - -// Baggage is a list of baggage members representing the baggage-string as -// defined by the W3C Baggage specification. -type Baggage struct { //nolint:golint - list baggage.List -} - -// New returns a new valid Baggage. It returns an error if it results in a -// Baggage exceeding limits set in that specification. -// -// It expects all the provided members to have already been validated. -func New(members ...Member) (Baggage, error) { - if len(members) == 0 { - return Baggage{}, nil - } - - b := make(baggage.List) - for _, m := range members { - if !m.hasData { - return Baggage{}, errInvalidMember - } - - // OpenTelemetry resolves duplicates by last-one-wins. - b[m.key] = baggage.Item{ - Value: m.value, - Properties: m.properties.asInternal(), - } - } - - // Check member numbers after deduplicating. - if len(b) > maxMembers { - return Baggage{}, errMemberNumber - } - - bag := Baggage{b} - if n := len(bag.String()); n > maxBytesPerBaggageString { - return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) - } - - return bag, nil -} - -// Parse attempts to decode a baggage-string from the passed string. It -// returns an error if the input is invalid according to the W3C Baggage -// specification. -// -// If there are duplicate list-members contained in baggage, the last one -// defined (reading left-to-right) will be the only one kept. This diverges -// from the W3C Baggage specification which allows duplicate list-members, but -// conforms to the OpenTelemetry Baggage specification. -func Parse(bStr string) (Baggage, error) { - if bStr == "" { - return Baggage{}, nil - } - - if n := len(bStr); n > maxBytesPerBaggageString { - return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) - } - - b := make(baggage.List) - for _, memberStr := range strings.Split(bStr, listDelimiter) { - m, err := parseMember(memberStr) - if err != nil { - return Baggage{}, err - } - // OpenTelemetry resolves duplicates by last-one-wins. - b[m.key] = baggage.Item{ - Value: m.value, - Properties: m.properties.asInternal(), - } - } - - // OpenTelemetry does not allow for duplicate list-members, but the W3C - // specification does. Now that we have deduplicated, ensure the baggage - // does not exceed list-member limits. - if len(b) > maxMembers { - return Baggage{}, errMemberNumber - } - - return Baggage{b}, nil -} - -// Member returns the baggage list-member identified by key. -// -// If there is no list-member matching the passed key the returned Member will -// be a zero-value Member. -// The returned member is not validated, as we assume the validation happened -// when it was added to the Baggage. -func (b Baggage) Member(key string) Member { - v, ok := b.list[key] - if !ok { - // We do not need to worry about distiguising between the situation - // where a zero-valued Member is included in the Baggage because a - // zero-valued Member is invalid according to the W3C Baggage - // specification (it has an empty key). - return newInvalidMember() - } - - return Member{ - key: key, - value: v.Value, - properties: fromInternalProperties(v.Properties), - } -} - -// Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. -// -// The returned members are not validated, as we assume the validation happened -// when they were added to the Baggage. -func (b Baggage) Members() []Member { - if len(b.list) == 0 { - return nil - } - - members := make([]Member, 0, len(b.list)) - for k, v := range b.list { - members = append(members, Member{ - key: k, - value: v.Value, - properties: fromInternalProperties(v.Properties), - }) - } - return members -} - -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is -// replaced. -// -// If member is invalid according to the W3C Baggage specification, an error -// is returned with the original Baggage. -func (b Baggage) SetMember(member Member) (Baggage, error) { - if !member.hasData { - return b, errInvalidMember - } - - n := len(b.list) - if _, ok := b.list[member.key]; !ok { - n++ - } - list := make(baggage.List, n) - - for k, v := range b.list { - // Do not copy if we are just going to overwrite. - if k == member.key { - continue - } - list[k] = v - } - - list[member.key] = baggage.Item{ - Value: member.value, - Properties: member.properties.asInternal(), - } - - return Baggage{list: list}, nil -} - -// DeleteMember returns a copy of the Baggage with the list-member identified -// by key removed. -func (b Baggage) DeleteMember(key string) Baggage { - n := len(b.list) - if _, ok := b.list[key]; ok { - n-- - } - list := make(baggage.List, n) - - for k, v := range b.list { - if k == key { - continue - } - list[k] = v - } - - return Baggage{list: list} -} - -// Len returns the number of list-members in the Baggage. -func (b Baggage) Len() int { - return len(b.list) -} - -// String encodes Baggage into a string compliant with the W3C Baggage -// specification. The returned string will be invalid if the Baggage contains -// any invalid list-members. -func (b Baggage) String() string { - members := make([]string, 0, len(b.list)) - for k, v := range b.list { - members = append(members, Member{ - key: k, - value: v.Value, - properties: fromInternalProperties(v.Properties), - }.String()) - } - return strings.Join(members, listDelimiter) -} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go deleted file mode 100644 index 24b34b7564..0000000000 --- a/vendor/go.opentelemetry.io/otel/baggage/context.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package baggage // import "go.opentelemetry.io/otel/baggage" - -import ( - "context" - - "go.opentelemetry.io/otel/internal/baggage" -) - -// ContextWithBaggage returns a copy of parent with baggage. -func ContextWithBaggage(parent context.Context, b Baggage) context.Context { - // Delegate so any hooks for the OpenTracing bridge are handled. - return baggage.ContextWithList(parent, b.list) -} - -// ContextWithoutBaggage returns a copy of parent with no baggage. -func ContextWithoutBaggage(parent context.Context) context.Context { - // Delegate so any hooks for the OpenTracing bridge are handled. - return baggage.ContextWithList(parent, nil) -} - -// FromContext returns the baggage contained in ctx. -func FromContext(ctx context.Context) Baggage { - // Delegate so any hooks for the OpenTracing bridge are handled. - return Baggage{list: baggage.ListFromContext(ctx)} -} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go deleted file mode 100644 index 4545100df6..0000000000 --- a/vendor/go.opentelemetry.io/otel/baggage/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package baggage provides functionality for storing and retrieving -baggage items in Go context. For propagating the baggage, see the -go.opentelemetry.io/otel/propagation package. -*/ -package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go deleted file mode 100644 index 064a9279fd..0000000000 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package codes // import "go.opentelemetry.io/otel/codes" - -import ( - "encoding/json" - "fmt" - "strconv" -) - -const ( - // Unset is the default status code. - Unset Code = 0 - // Error indicates the operation contains an error. - Error Code = 1 - // Ok indicates operation has been validated by an Application developers - // or Operator to have completed successfully, or contain no error. - Ok Code = 2 - - maxCode = 3 -) - -// Code is an 32-bit representation of a status state. -type Code uint32 - -var codeToStr = map[Code]string{ - Unset: "Unset", - Error: "Error", - Ok: "Ok", -} - -var strToCode = map[string]Code{ - `"Unset"`: Unset, - `"Error"`: Error, - `"Ok"`: Ok, -} - -// String returns the Code as a string. -func (c Code) String() string { - return codeToStr[c] -} - -// UnmarshalJSON unmarshals b into the Code. -// -// This is based on the functionality in the gRPC codes package: -// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 -func (c *Code) UnmarshalJSON(b []byte) error { - // From json.Unmarshaler: By convention, to approximate the behavior of - // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as - // a no-op. - if string(b) == "null" { - return nil - } - if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") - } - - var x interface{} - if err := json.Unmarshal(b, &x); err != nil { - return err - } - switch x.(type) { - case string: - if jc, ok := strToCode[string(b)]; ok { - *c = jc - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) - case float64: - if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { - if ci >= maxCode { - return fmt.Errorf("invalid code: %q", ci) - } - - *c = Code(ci) - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) - default: - return fmt.Errorf("invalid code: %q", string(b)) - } -} - -// MarshalJSON returns c as the JSON encoding of c. -func (c *Code) MarshalJSON() ([]byte, error) { - if c == nil { - return []byte("null"), nil - } - str, ok := codeToStr[*c] - if !ok { - return nil, fmt.Errorf("invalid code: %d", *c) - } - return []byte(fmt.Sprintf("%q", str)), nil -} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go deleted file mode 100644 index df3e0f1b62..0000000000 --- a/vendor/go.opentelemetry.io/otel/codes/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package codes defines the canonical error codes used by OpenTelemetry. - -It conforms to [the OpenTelemetry -specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode). -*/ -package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go deleted file mode 100644 index daa36c89dc..0000000000 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package otel provides global access to the OpenTelemetry API. The subpackages of -the otel package provide an implementation of the OpenTelemetry API. - -The provided API is used to instrument code and measure data about that code's -performance and operation. The measured data, by default, is not processed or -transmitted anywhere. An implementation of the OpenTelemetry SDK, like the -default SDK implementation (go.opentelemetry.io/otel/sdk), and associated -exporters are used to process and transport this data. - -To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/. - -To read more about tracing, see go.opentelemetry.io/otel/trace. - -To read more about metrics, see go.opentelemetry.io/otel/metric. - -To read more about propagation, see go.opentelemetry.io/otel/propagation and -go.opentelemetry.io/otel/baggage. -*/ -package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go deleted file mode 100644 index 72fad85412..0000000000 --- a/vendor/go.opentelemetry.io/otel/error_handler.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -// ErrorHandler handles irremediable events. -type ErrorHandler interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Handle handles any error deemed irremediable by an OpenTelemetry - // component. - Handle(error) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// ErrorHandlerFunc is a convenience adapter to allow the use of a function -// as an ErrorHandler. -type ErrorHandlerFunc func(error) - -var _ ErrorHandler = ErrorHandlerFunc(nil) - -// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. -func (f ErrorHandlerFunc) Handle(err error) { - f(err) -} diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh deleted file mode 100644 index 9a58fb1d37..0000000000 --- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -euo pipefail - -top_dir='.' -if [[ $# -gt 0 ]]; then - top_dir="${1}" -fi - -p=$(pwd) -mod_dirs=() - -# Note `mapfile` does not exist in older bash versions: -# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash - -while IFS= read -r line; do - mod_dirs+=("$line") -done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) - -for mod_dir in "${mod_dirs[@]}"; do - cd "${mod_dir}" - - while IFS= read -r line; do - echo ".${line#${p}}" - done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') - cd "${p}" -done diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go deleted file mode 100644 index 35263e01ac..0000000000 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "log" - "os" - "sync" -) - -var ( - // globalErrorHandler provides an ErrorHandler that can be used - // throughout an OpenTelemetry instrumented project. When a user - // specified ErrorHandler is registered (`SetErrorHandler`) all calls to - // `Handle` and will be delegated to the registered ErrorHandler. - globalErrorHandler = defaultErrorHandler() - - // Compile-time check that delegator implements ErrorHandler. - _ ErrorHandler = (*delegator)(nil) - // Compile-time check that errLogger implements ErrorHandler. - _ ErrorHandler = (*errLogger)(nil) -) - -type delegator struct { - lock *sync.RWMutex - eh ErrorHandler -} - -func (d *delegator) Handle(err error) { - d.lock.RLock() - defer d.lock.RUnlock() - d.eh.Handle(err) -} - -// setDelegate sets the ErrorHandler delegate. -func (d *delegator) setDelegate(eh ErrorHandler) { - d.lock.Lock() - defer d.lock.Unlock() - d.eh = eh -} - -func defaultErrorHandler() *delegator { - return &delegator{ - lock: &sync.RWMutex{}, - eh: &errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}, - } - -} - -// errLogger logs errors if no delegate is set, otherwise they are delegated. -type errLogger struct { - l *log.Logger -} - -// Handle logs err if no delegate is set, otherwise it is delegated. -func (h *errLogger) Handle(err error) { - h.l.Print(err) -} - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return globalErrorHandler -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - globalErrorHandler.setDelegate(h) -} - -// Handle is a convenience function for ErrorHandler().Handle(err) -func Handle(err error) { - GetErrorHandler().Handle(err) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go deleted file mode 100644 index b96e5408e6..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package baggage provides base types and functionality to store and retrieve -baggage in Go context. This package exists because the OpenTracing bridge to -OpenTelemetry needs to synchronize state whenever baggage for a context is -modified and that context contains an OpenTracing span. If it were not for -this need this package would not need to exist and the -`go.opentelemetry.io/otel/baggage` package would be the singular place where -W3C baggage is handled. -*/ -package baggage // import "go.opentelemetry.io/otel/internal/baggage" - -// List is the collection of baggage members. The W3C allows for duplicates, -// but OpenTelemetry does not, therefore, this is represented as a map. -type List map[string]Item - -// Item is the value and metadata properties part of a list-member. -type Item struct { - Value string - Properties []Property -} - -// Property is a metadata entry for a list-member. -type Property struct { - Key, Value string - - // HasValue indicates if a zero-value value means the property does not - // have a value or if it was the zero-value. - HasValue bool -} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go deleted file mode 100644 index 3c2784eea3..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package baggage // import "go.opentelemetry.io/otel/internal/baggage" - -import "context" - -type baggageContextKeyType int - -const baggageKey baggageContextKeyType = iota - -// SetHookFunc is a callback called when storing baggage in the context. -type SetHookFunc func(context.Context, List) context.Context - -// GetHookFunc is a callback called when getting baggage from the context. -type GetHookFunc func(context.Context, List) List - -type baggageState struct { - list List - - setHook SetHookFunc - getHook GetHookFunc -} - -// ContextWithSetHook returns a copy of parent with hook configured to be -// invoked every time ContextWithBaggage is called. -// -// Passing nil SetHookFunc creates a context with no set hook to call. -func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { - var s baggageState - switch v := parent.Value(baggageKey).(type) { - case baggageState: - s = v - } - - s.setHook = hook - return context.WithValue(parent, baggageKey, s) -} - -// ContextWithGetHook returns a copy of parent with hook configured to be -// invoked every time FromContext is called. -// -// Passing nil GetHookFunc creates a context with no get hook to call. -func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { - var s baggageState - switch v := parent.Value(baggageKey).(type) { - case baggageState: - s = v - } - - s.getHook = hook - return context.WithValue(parent, baggageKey, s) -} - -// ContextWithList returns a copy of parent with baggage. Passing nil list -// returns a context without any baggage. -func ContextWithList(parent context.Context, list List) context.Context { - var s baggageState - switch v := parent.Value(baggageKey).(type) { - case baggageState: - s = v - } - - s.list = list - ctx := context.WithValue(parent, baggageKey, s) - if s.setHook != nil { - ctx = s.setHook(ctx, list) - } - - return ctx -} - -// ListFromContext returns the baggage contained in ctx. -func ListFromContext(ctx context.Context) List { - switch v := ctx.Value(baggageKey).(type) { - case baggageState: - if v.getHook != nil { - return v.getHook(ctx, v.list) - } - return v.list - default: - return nil - } -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go deleted file mode 100644 index 0a378476b0..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "log" - "os" - "sync" - - "github.com/go-logr/logr" - "github.com/go-logr/stdr" -) - -// globalLogger is the logging interface used within the otel api and sdk provide deatails of the internals. -// -// The default logger uses stdr which is backed by the standard `log.Logger` -// interface. This logger will only show messages at the Error Level. -var globalLogger logr.Logger = stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) -var globalLoggerLock = &sync.RWMutex{} - -// SetLogger overrides the globalLogger with l. -// -// To see Info messages use a logger with `l.V(1).Enabled() == true` -// To see Debug messages use a logger with `l.V(5).Enabled() == true` -func SetLogger(l logr.Logger) { - globalLoggerLock.Lock() - defer globalLoggerLock.Unlock() - globalLogger = l -} - -// Info prints messages about the general state of the API or SDK. -// This should usually be less then 5 messages a minute -func Info(msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.V(1).Info(msg, keysAndValues...) -} - -// Error prints messages about exceptional states of the API or SDK. -func Error(err error, msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.Error(err, msg, keysAndValues...) -} - -// Debug prints messages about all internal changes in the API or SDK. -func Debug(msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.V(5).Info(msg, keysAndValues...) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go deleted file mode 100644 index 06bac35c2f..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "context" - "sync" - - "go.opentelemetry.io/otel/propagation" -) - -// textMapPropagator is a default TextMapPropagator that delegates calls to a -// registered delegate if one is set, otherwise it defaults to delegating the -// calls to a the default no-op propagation.TextMapPropagator. -type textMapPropagator struct { - mtx sync.Mutex - once sync.Once - delegate propagation.TextMapPropagator - noop propagation.TextMapPropagator -} - -// Compile-time guarantee that textMapPropagator implements the -// propagation.TextMapPropagator interface. -var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) - -func newTextMapPropagator() *textMapPropagator { - return &textMapPropagator{ - noop: propagation.NewCompositeTextMapPropagator(), - } -} - -// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are -// forwarded to. Delegation can only be performed once, all subsequent calls -// perform no delegation. -func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { - if delegate == nil { - return - } - - p.mtx.Lock() - p.once.Do(func() { p.delegate = delegate }) - p.mtx.Unlock() -} - -// effectiveDelegate returns the current delegate of p if one is set, -// otherwise the default noop TextMapPropagator is returned. This method -// can be called concurrently. -func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { - p.mtx.Lock() - defer p.mtx.Unlock() - if p.delegate != nil { - return p.delegate - } - return p.noop -} - -// Inject set cross-cutting concerns from the Context into the carrier. -func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { - p.effectiveDelegate().Inject(ctx, carrier) -} - -// Extract reads cross-cutting concerns from the carrier into a Context. -func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { - return p.effectiveDelegate().Extract(ctx, carrier) -} - -// Fields returns the keys whose values are set with Inject. -func (p *textMapPropagator) Fields() []string { - return p.effectiveDelegate().Fields() -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go deleted file mode 100644 index d6b3e900cd..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" -) - -type ( - tracerProviderHolder struct { - tp trace.TracerProvider - } - - propagatorsHolder struct { - tm propagation.TextMapPropagator - } -) - -var ( - globalTracer = defaultTracerValue() - globalPropagators = defaultPropagatorsValue() - - delegateTraceOnce sync.Once - delegateTextMapPropagatorOnce sync.Once -) - -// TracerProvider is the internal implementation for global.TracerProvider. -func TracerProvider() trace.TracerProvider { - return globalTracer.Load().(tracerProviderHolder).tp -} - -// SetTracerProvider is the internal implementation for global.SetTracerProvider. -func SetTracerProvider(tp trace.TracerProvider) { - delegateTraceOnce.Do(func() { - current := TracerProvider() - if current == tp { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid TracerProvider, the global instance cannot be reinstalled") - } else if def, ok := current.(*tracerProvider); ok { - def.setDelegate(tp) - } - - }) - globalTracer.Store(tracerProviderHolder{tp: tp}) -} - -// TextMapPropagator is the internal implementation for global.TextMapPropagator. -func TextMapPropagator() propagation.TextMapPropagator { - return globalPropagators.Load().(propagatorsHolder).tm -} - -// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. -func SetTextMapPropagator(p propagation.TextMapPropagator) { - // For the textMapPropagator already returned by TextMapPropagator - // delegate to p. - delegateTextMapPropagatorOnce.Do(func() { - if current := TextMapPropagator(); current == p { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid TextMapPropagator, the global instance cannot be reinstalled") - } else if def, ok := current.(*textMapPropagator); ok { - def.SetDelegate(p) - } - }) - // Return p when subsequent calls to TextMapPropagator are made. - globalPropagators.Store(propagatorsHolder{tm: p}) -} - -func defaultTracerValue() *atomic.Value { - v := &atomic.Value{} - v.Store(tracerProviderHolder{tp: &tracerProvider{}}) - return v -} - -func defaultPropagatorsValue() *atomic.Value { - v := &atomic.Value{} - v.Store(propagatorsHolder{tm: newTextMapPropagator()}) - return v -} - -// ResetForTest restores the initial global state, for testing purposes. -func ResetForTest() { - globalTracer = defaultTracerValue() - globalPropagators = defaultPropagatorsValue() - delegateTraceOnce = sync.Once{} - delegateTextMapPropagatorOnce = sync.Once{} -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go deleted file mode 100644 index 5f008d0982..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -/* -This file contains the forwarding implementation of the TracerProvider used as -the default global instance. Prior to initialization of an SDK, Tracers -returned by the global TracerProvider will provide no-op functionality. This -means that all Span created prior to initialization are no-op Spans. - -Once an SDK has been initialized, all provided no-op Tracers are swapped for -Tracers provided by the SDK defined TracerProvider. However, any Span started -prior to this initialization does not change its behavior. Meaning, the Span -remains a no-op Span. - -The implementation to track and swap Tracers locks all new Tracer creation -until the swap is complete. This assumes that this operation is not -performance-critical. If that assumption is incorrect, be sure to configure an -SDK prior to any Tracer creation. -*/ - -import ( - "context" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" -) - -// tracerProvider is a placeholder for a configured SDK TracerProvider. -// -// All TracerProvider functionality is forwarded to a delegate once -// configured. -type tracerProvider struct { - mtx sync.Mutex - tracers map[il]*tracer - delegate trace.TracerProvider -} - -// Compile-time guarantee that tracerProvider implements the TracerProvider -// interface. -var _ trace.TracerProvider = &tracerProvider{} - -// setDelegate configures p to delegate all TracerProvider functionality to -// provider. -// -// All Tracers provided prior to this function call are switched out to be -// Tracers provided by provider. -// -// It is guaranteed by the caller that this happens only once. -func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { - p.mtx.Lock() - defer p.mtx.Unlock() - - p.delegate = provider - - if len(p.tracers) == 0 { - return - } - - for _, t := range p.tracers { - t.setDelegate(provider) - } - - p.tracers = nil -} - -// Tracer implements TracerProvider. -func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { - p.mtx.Lock() - defer p.mtx.Unlock() - - if p.delegate != nil { - return p.delegate.Tracer(name, opts...) - } - - // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. - - c := trace.NewTracerConfig(opts...) - key := il{ - name: name, - version: c.InstrumentationVersion(), - } - - if p.tracers == nil { - p.tracers = make(map[il]*tracer) - } - - if val, ok := p.tracers[key]; ok { - return val - } - - t := &tracer{name: name, opts: opts, provider: p} - p.tracers[key] = t - return t -} - -type il struct { - name string - version string -} - -// tracer is a placeholder for a trace.Tracer. -// -// All Tracer functionality is forwarded to a delegate once configured. -// Otherwise, all functionality is forwarded to a NoopTracer. -type tracer struct { - name string - opts []trace.TracerOption - provider *tracerProvider - - delegate atomic.Value -} - -// Compile-time guarantee that tracer implements the trace.Tracer interface. -var _ trace.Tracer = &tracer{} - -// setDelegate configures t to delegate all Tracer functionality to Tracers -// created by provider. -// -// All subsequent calls to the Tracer methods will be passed to the delegate. -// -// It is guaranteed by the caller that this happens only once. -func (t *tracer) setDelegate(provider trace.TracerProvider) { - t.delegate.Store(provider.Tracer(t.name, t.opts...)) -} - -// Start implements trace.Tracer by forwarding the call to t.delegate if -// set, otherwise it forwards the call to a NoopTracer. -func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - delegate := t.delegate.Load() - if delegate != nil { - return delegate.(trace.Tracer).Start(ctx, name, opts...) - } - - s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} - ctx = trace.ContextWithSpan(ctx, s) - return ctx, s -} - -// nonRecordingSpan is a minimal implementation of a Span that wraps a -// SpanContext. It performs no operations other than to return the wrapped -// SpanContext. -type nonRecordingSpan struct { - sc trace.SpanContext - tracer *tracer -} - -var _ trace.Span = nonRecordingSpan{} - -// SpanContext returns the wrapped SpanContext. -func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } - -// IsRecording always returns false. -func (nonRecordingSpan) IsRecording() bool { return false } - -// SetStatus does nothing. -func (nonRecordingSpan) SetStatus(codes.Code, string) {} - -// SetError does nothing. -func (nonRecordingSpan) SetError(bool) {} - -// SetAttributes does nothing. -func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} - -// End does nothing. -func (nonRecordingSpan) End(...trace.SpanEndOption) {} - -// RecordError does nothing. -func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} - -// AddEvent does nothing. -func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} - -// SetName does nothing. -func (nonRecordingSpan) SetName(string) {} - -func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/LICENSE b/vendor/go.opentelemetry.io/otel/internal/metric/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/metric/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go deleted file mode 100644 index 77781ead11..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/metric/global" - -import ( - "context" - "sync" - "sync/atomic" - "unsafe" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/internal/metric/registry" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// This file contains the forwarding implementation of MeterProvider used as -// the default global instance. Metric events using instruments provided by -// this implementation are no-ops until the first Meter implementation is set -// as the global provider. -// -// The implementation here uses Mutexes to maintain a list of active Meters in -// the MeterProvider and Instruments in each Meter, under the assumption that -// these interfaces are not performance-critical. -// -// We have the invariant that setDelegate() will be called before a new -// MeterProvider implementation is registered as the global provider. Mutexes -// in the MeterProvider and Meters ensure that each instrument has a delegate -// before the global provider is set. -// -// Metric uniqueness checking is implemented by calling the exported -// methods of the api/metric/registry package. - -type meterKey struct { - InstrumentationName string - InstrumentationVersion string - SchemaURL string -} - -type meterProvider struct { - delegate metric.MeterProvider - - // lock protects `delegate` and `meters`. - lock sync.Mutex - - // meters maintains a unique entry for every named Meter - // that has been registered through the global instance. - meters map[meterKey]*meterEntry -} - -type meterImpl struct { - delegate unsafe.Pointer // (*metric.MeterImpl) - - lock sync.Mutex - syncInsts []*syncImpl - asyncInsts []*asyncImpl -} - -type meterEntry struct { - unique sdkapi.MeterImpl - impl meterImpl -} - -type instrument struct { - descriptor sdkapi.Descriptor -} - -type syncImpl struct { - delegate unsafe.Pointer // (*sdkapi.SyncImpl) - - instrument -} - -type asyncImpl struct { - delegate unsafe.Pointer // (*sdkapi.AsyncImpl) - - instrument - - runner sdkapi.AsyncRunner -} - -var _ metric.MeterProvider = &meterProvider{} -var _ sdkapi.MeterImpl = &meterImpl{} -var _ sdkapi.InstrumentImpl = &syncImpl{} -var _ sdkapi.AsyncImpl = &asyncImpl{} - -func (inst *instrument) Descriptor() sdkapi.Descriptor { - return inst.descriptor -} - -// MeterProvider interface and delegation - -func newMeterProvider() *meterProvider { - return &meterProvider{ - meters: map[meterKey]*meterEntry{}, - } -} - -func (p *meterProvider) setDelegate(provider metric.MeterProvider) { - p.lock.Lock() - defer p.lock.Unlock() - - p.delegate = provider - for key, entry := range p.meters { - entry.impl.setDelegate(key, provider) - } - p.meters = nil -} - -func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { - p.lock.Lock() - defer p.lock.Unlock() - - if p.delegate != nil { - return p.delegate.Meter(instrumentationName, opts...) - } - - cfg := metric.NewMeterConfig(opts...) - key := meterKey{ - InstrumentationName: instrumentationName, - InstrumentationVersion: cfg.InstrumentationVersion(), - SchemaURL: cfg.SchemaURL(), - } - entry, ok := p.meters[key] - if !ok { - entry = &meterEntry{} - // Note: This code implements its own MeterProvider - // name-uniqueness logic because there is - // synchronization required at the moment of - // delegation. We use the same instrument-uniqueness - // checking the real SDK uses here: - entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl) - p.meters[key] = entry - } - return metric.WrapMeterImpl(entry.unique) -} - -// Meter interface and delegation - -func (m *meterImpl) setDelegate(key meterKey, provider metric.MeterProvider) { - m.lock.Lock() - defer m.lock.Unlock() - - d := new(sdkapi.MeterImpl) - *d = provider.Meter( - key.InstrumentationName, - metric.WithInstrumentationVersion(key.InstrumentationVersion), - metric.WithSchemaURL(key.SchemaURL), - ).MeterImpl() - m.delegate = unsafe.Pointer(d) - - for _, inst := range m.syncInsts { - inst.setDelegate(*d) - } - m.syncInsts = nil - for _, obs := range m.asyncInsts { - obs.setDelegate(*d) - } - m.asyncInsts = nil -} - -func (m *meterImpl) NewSyncInstrument(desc sdkapi.Descriptor) (sdkapi.SyncImpl, error) { - m.lock.Lock() - defer m.lock.Unlock() - - if meterPtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { - return (*meterPtr).NewSyncInstrument(desc) - } - - inst := &syncImpl{ - instrument: instrument{ - descriptor: desc, - }, - } - m.syncInsts = append(m.syncInsts, inst) - return inst, nil -} - -// Synchronous delegation - -func (inst *syncImpl) setDelegate(d sdkapi.MeterImpl) { - implPtr := new(sdkapi.SyncImpl) - - var err error - *implPtr, err = d.NewSyncInstrument(inst.descriptor) - - if err != nil { - // TODO: There is no standard way to deliver this error to the user. - // See https://github.com/open-telemetry/opentelemetry-go/issues/514 - // Note that the default SDK will not generate any errors yet, this is - // only for added safety. - panic(err) - } - - atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr)) -} - -func (inst *syncImpl) Implementation() interface{} { - if implPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil { - return (*implPtr).Implementation() - } - return inst -} - -// Async delegation - -func (m *meterImpl) NewAsyncInstrument( - desc sdkapi.Descriptor, - runner sdkapi.AsyncRunner, -) (sdkapi.AsyncImpl, error) { - - m.lock.Lock() - defer m.lock.Unlock() - - if meterPtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { - return (*meterPtr).NewAsyncInstrument(desc, runner) - } - - inst := &asyncImpl{ - instrument: instrument{ - descriptor: desc, - }, - runner: runner, - } - m.asyncInsts = append(m.asyncInsts, inst) - return inst, nil -} - -func (obs *asyncImpl) Implementation() interface{} { - if implPtr := (*sdkapi.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil { - return (*implPtr).Implementation() - } - return obs -} - -func (obs *asyncImpl) setDelegate(d sdkapi.MeterImpl) { - implPtr := new(sdkapi.AsyncImpl) - - var err error - *implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner) - - if err != nil { - // TODO: There is no standard way to deliver this error to the user. - // See https://github.com/open-telemetry/opentelemetry-go/issues/514 - // Note that the default SDK will not generate any errors yet, this is - // only for added safety. - panic(err) - } - - atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr)) -} - -// Metric updates - -func (m *meterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...sdkapi.Measurement) { - if delegatePtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil { - (*delegatePtr).RecordBatch(ctx, labels, measurements...) - } -} - -func (inst *syncImpl) RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) { - if instPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil { - (*instPtr).RecordOne(ctx, number, labels) - } -} - -func AtomicFieldOffsets() map[string]uintptr { - return map[string]uintptr{ - "meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate), - "meterImpl.delegate": unsafe.Offsetof(meterImpl{}.delegate), - "syncImpl.delegate": unsafe.Offsetof(syncImpl{}.delegate), - "asyncImpl.delegate": unsafe.Offsetof(asyncImpl{}.delegate), - } -} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go b/vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go deleted file mode 100644 index 896c6dd1c3..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/metric/global" - -import ( - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel/metric" -) - -type meterProviderHolder struct { - mp metric.MeterProvider -} - -var ( - globalMeter = defaultMeterValue() - - delegateMeterOnce sync.Once -) - -// MeterProvider is the internal implementation for global.MeterProvider. -func MeterProvider() metric.MeterProvider { - return globalMeter.Load().(meterProviderHolder).mp -} - -// SetMeterProvider is the internal implementation for global.SetMeterProvider. -func SetMeterProvider(mp metric.MeterProvider) { - delegateMeterOnce.Do(func() { - current := MeterProvider() - - if current == mp { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid MeterProvider, the global instance cannot be reinstalled") - } else if def, ok := current.(*meterProvider); ok { - def.setDelegate(mp) - } - }) - globalMeter.Store(meterProviderHolder{mp: mp}) -} - -func defaultMeterValue() *atomic.Value { - v := &atomic.Value{} - v.Store(meterProviderHolder{mp: newMeterProvider()}) - return v -} - -// ResetForTest restores the initial global state, for testing purposes. -func ResetForTest() { - globalMeter = defaultMeterValue() - delegateMeterOnce = sync.Once{} -} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go b/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go deleted file mode 100644 index 2f17562f0e..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package registry provides a non-standalone implementation of -MeterProvider that adds uniqueness checking for instrument descriptors -on top of other MeterProvider it wraps. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ -package registry // import "go.opentelemetry.io/otel/internal/metric/registry" diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go b/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go deleted file mode 100644 index c929bf45c8..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry // import "go.opentelemetry.io/otel/internal/metric/registry" - -import ( - "context" - "fmt" - "sync" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// UniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding -// uniqueness checking for instrument descriptors. -type UniqueInstrumentMeterImpl struct { - lock sync.Mutex - impl sdkapi.MeterImpl - state map[string]sdkapi.InstrumentImpl -} - -var _ sdkapi.MeterImpl = (*UniqueInstrumentMeterImpl)(nil) - -// ErrMetricKindMismatch is the standard error for mismatched metric -// instrument definitions. -var ErrMetricKindMismatch = fmt.Errorf( - "a metric was already registered by this name with another kind or number type") - -// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl -// with the addition of instrument name uniqueness checking. -func NewUniqueInstrumentMeterImpl(impl sdkapi.MeterImpl) *UniqueInstrumentMeterImpl { - return &UniqueInstrumentMeterImpl{ - impl: impl, - state: map[string]sdkapi.InstrumentImpl{}, - } -} - -// MeterImpl gives the caller access to the underlying MeterImpl -// used by this UniqueInstrumentMeterImpl. -func (u *UniqueInstrumentMeterImpl) MeterImpl() sdkapi.MeterImpl { - return u.impl -} - -// RecordBatch implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, ms ...sdkapi.Measurement) { - u.impl.RecordBatch(ctx, labels, ms...) -} - -// NewMetricKindMismatchError formats an error that describes a -// mismatched metric instrument definition. -func NewMetricKindMismatchError(desc sdkapi.Descriptor) error { - return fmt.Errorf("metric %s registered as %s %s: %w", - desc.Name(), - desc.NumberKind(), - desc.InstrumentKind(), - ErrMetricKindMismatch) -} - -// Compatible determines whether two sdkapi.Descriptors are considered -// the same for the purpose of uniqueness checking. -func Compatible(candidate, existing sdkapi.Descriptor) bool { - return candidate.InstrumentKind() == existing.InstrumentKind() && - candidate.NumberKind() == existing.NumberKind() -} - -// checkUniqueness returns an ErrMetricKindMismatch error if there is -// a conflict between a descriptor that was already registered and the -// `descriptor` argument. If there is an existing compatible -// registration, this returns the already-registered instrument. If -// there is no conflict and no prior registration, returns (nil, nil). -func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor sdkapi.Descriptor) (sdkapi.InstrumentImpl, error) { - impl, ok := u.state[descriptor.Name()] - if !ok { - return nil, nil - } - - if !Compatible(descriptor, impl.Descriptor()) { - return nil, NewMetricKindMismatchError(impl.Descriptor()) - } - - return impl, nil -} - -// NewSyncInstrument implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) { - u.lock.Lock() - defer u.lock.Unlock() - - impl, err := u.checkUniqueness(descriptor) - - if err != nil { - return nil, err - } else if impl != nil { - return impl.(sdkapi.SyncImpl), nil - } - - syncInst, err := u.impl.NewSyncInstrument(descriptor) - if err != nil { - return nil, err - } - u.state[descriptor.Name()] = syncInst - return syncInst, nil -} - -// NewAsyncInstrument implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument( - descriptor sdkapi.Descriptor, - runner sdkapi.AsyncRunner, -) (sdkapi.AsyncImpl, error) { - u.lock.Lock() - defer u.lock.Unlock() - - impl, err := u.checkUniqueness(descriptor) - - if err != nil { - return nil, err - } else if impl != nil { - return impl.(sdkapi.AsyncImpl), nil - } - - asyncInst, err := u.impl.NewAsyncInstrument(descriptor, runner) - if err != nil { - return nil, err - } - u.state[descriptor.Name()] = asyncInst - return asyncInst, nil -} diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go deleted file mode 100644 index ce7afaa188..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opentelemetry.io/otel/internal" - -import ( - "math" - "unsafe" -) - -func BoolToRaw(b bool) uint64 { - if b { - return 1 - } - return 0 -} - -func RawToBool(r uint64) bool { - return r != 0 -} - -func Int64ToRaw(i int64) uint64 { - return uint64(i) -} - -func RawToInt64(r uint64) int64 { - return int64(r) -} - -func Float64ToRaw(f float64) uint64 { - return math.Float64bits(f) -} - -func RawToFloat64(r uint64) float64 { - return math.Float64frombits(r) -} - -func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) -} - -func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) -} diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go deleted file mode 100644 index c4f8acd5d8..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal_logging.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "github.com/go-logr/logr" - - "go.opentelemetry.io/otel/internal/global" -) - -// SetLogger configures the logger used internally to opentelemetry. -func SetLogger(logger logr.Logger) { - global.SetLogger(logger) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go deleted file mode 100644 index 3f722344fa..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "go.opentelemetry.io/otel/metric/unit" -) - -// InstrumentConfig contains options for metric instrument descriptors. -type InstrumentConfig struct { - description string - unit unit.Unit -} - -// Description describes the instrument in human-readable terms. -func (cfg *InstrumentConfig) Description() string { - return cfg.description -} - -// Unit describes the measurement unit for a instrument. -func (cfg *InstrumentConfig) Unit() unit.Unit { - return cfg.unit -} - -// InstrumentOption is an interface for applying metric instrument options. -type InstrumentOption interface { - // ApplyMeter is used to set a InstrumentOption value of a - // InstrumentConfig. - applyInstrument(InstrumentConfig) InstrumentConfig -} - -// NewInstrumentConfig creates a new InstrumentConfig -// and applies all the given options. -func NewInstrumentConfig(opts ...InstrumentOption) InstrumentConfig { - var config InstrumentConfig - for _, o := range opts { - config = o.applyInstrument(config) - } - return config -} - -type instrumentOptionFunc func(InstrumentConfig) InstrumentConfig - -func (fn instrumentOptionFunc) applyInstrument(cfg InstrumentConfig) InstrumentConfig { - return fn(cfg) -} - -// WithDescription applies provided description. -func WithDescription(desc string) InstrumentOption { - return instrumentOptionFunc(func(cfg InstrumentConfig) InstrumentConfig { - cfg.description = desc - return cfg - }) -} - -// WithUnit applies provided unit. -func WithUnit(unit unit.Unit) InstrumentOption { - return instrumentOptionFunc(func(cfg InstrumentConfig) InstrumentConfig { - cfg.unit = unit - return cfg - }) -} - -// MeterConfig contains options for Meters. -type MeterConfig struct { - instrumentationVersion string - schemaURL string -} - -// InstrumentationVersion is the version of the library providing instrumentation. -func (cfg *MeterConfig) InstrumentationVersion() string { - return cfg.instrumentationVersion -} - -// SchemaURL is the schema_url of the library providing instrumentation. -func (cfg *MeterConfig) SchemaURL() string { - return cfg.schemaURL -} - -// MeterOption is an interface for applying Meter options. -type MeterOption interface { - // ApplyMeter is used to set a MeterOption value of a MeterConfig. - applyMeter(MeterConfig) MeterConfig -} - -// NewMeterConfig creates a new MeterConfig and applies -// all the given options. -func NewMeterConfig(opts ...MeterOption) MeterConfig { - var config MeterConfig - for _, o := range opts { - config = o.applyMeter(config) - } - return config -} - -type meterOptionFunc func(MeterConfig) MeterConfig - -func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { - return fn(cfg) -} - -// WithInstrumentationVersion sets the instrumentation version. -func WithInstrumentationVersion(version string) MeterOption { - return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.instrumentationVersion = version - return config - }) -} - -// WithSchemaURL sets the schema URL. -func WithSchemaURL(schemaURL string) MeterOption { - return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.schemaURL = schemaURL - return config - }) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go deleted file mode 100644 index 4baf0719fc..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package metric provides an implementation of the metrics part of the -OpenTelemetry API. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. - -Measurements can be made about an operation being performed or the state of a -system in general. These measurements can be crucial to the reliable operation -of code and provide valuable insights about the inner workings of a system. - -Measurements are made using instruments provided by this package. The type of -instrument used will depend on the type of measurement being made and of what -part of a system is being measured. - -Instruments are categorized as Synchronous or Asynchronous and independently -as Adding or Grouping. Synchronous instruments are called by the user with a -Context. Asynchronous instruments are called by the SDK during collection. -Adding instruments are semantically intended for capturing a sum. Grouping -instruments are intended for capturing a distribution. - -Adding instruments may be monotonic, in which case they are non-decreasing -and naturally define a rate. - -The synchronous instrument names are: - - Counter: adding, monotonic - UpDownCounter: adding - Histogram: grouping - -and the asynchronous instruments are: - - CounterObserver: adding, monotonic - UpDownCounterObserver: adding - GaugeObserver: grouping - -All instruments are provided with support for either float64 or int64 input -values. - -An instrument is created using a Meter. Additionally, a Meter is used to -record batches of synchronous measurements or asynchronous observations. A -Meter is obtained using a MeterProvider. A Meter, like a Tracer, is unique to -the instrumentation it instruments and must be named and versioned when -created with a MeterProvider with the name and version of the instrumentation -library. - -Instrumentation should be designed to accept a MeterProvider from which it can -create its own unique Meter. Alternatively, the registered global -MeterProvider from the go.opentelemetry.io/otel package can be used as a -default. -*/ -package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/global/metric.go b/vendor/go.opentelemetry.io/otel/metric/global/metric.go deleted file mode 100644 index 14ba862002..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/global/metric.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/metric/global" - -import ( - "go.opentelemetry.io/otel/internal/metric/global" - "go.opentelemetry.io/otel/metric" -) - -// Meter creates an implementation of the Meter interface from the global -// MeterProvider. The instrumentationName must be the name of the library -// providing instrumentation. This name may be the same as the instrumented -// code only if that code provides built-in instrumentation. If the -// instrumentationName is empty, then a implementation defined default name -// will be used instead. -// -// This is short for MeterProvider().Meter(name) -func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { - return GetMeterProvider().Meter(instrumentationName, opts...) -} - -// GetMeterProvider returns the registered global meter provider. If -// none is registered then a default meter provider is returned that -// forwards the Meter interface to the first registered Meter. -// -// Use the meter provider to create a named meter. E.g. -// meter := global.MeterProvider().Meter("example.com/foo") -// or -// meter := global.Meter("example.com/foo") -func GetMeterProvider() metric.MeterProvider { - return global.MeterProvider() -} - -// SetMeterProvider registers `mp` as the global meter provider. -func SetMeterProvider(mp metric.MeterProvider) { - global.SetMeterProvider(mp) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/metric.go b/vendor/go.opentelemetry.io/otel/metric/metric.go deleted file mode 100644 index d8c5a6b3f3..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/metric.go +++ /dev/null @@ -1,538 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// MeterProvider supports named Meter instances. -type MeterProvider interface { - // Meter creates an implementation of the Meter interface. - // The instrumentationName must be the name of the library providing - // instrumentation. This name may be the same as the instrumented code - // only if that code provides built-in instrumentation. If the - // instrumentationName is empty, then a implementation defined default - // name will be used instead. - Meter(instrumentationName string, opts ...MeterOption) Meter -} - -// Meter is the creator of metric instruments. -// -// An uninitialized Meter is a no-op implementation. -type Meter struct { - impl sdkapi.MeterImpl -} - -// WrapMeterImpl constructs a `Meter` implementation from a -// `MeterImpl` implementation. -func WrapMeterImpl(impl sdkapi.MeterImpl) Meter { - return Meter{ - impl: impl, - } -} - -// Measurement is used for reporting a synchronous batch of metric -// values. Instances of this type should be created by synchronous -// instruments (e.g., Int64Counter.Measurement()). -// -// Note: This is an alias because it is a first-class member of the -// API but is also part of the lower-level sdkapi interface. -type Measurement = sdkapi.Measurement - -// Observation is used for reporting an asynchronous batch of metric -// values. Instances of this type should be created by asynchronous -// instruments (e.g., Int64GaugeObserver.Observation()). -// -// Note: This is an alias because it is a first-class member of the -// API but is also part of the lower-level sdkapi interface. -type Observation = sdkapi.Observation - -// RecordBatch atomically records a batch of measurements. -func (m Meter) RecordBatch(ctx context.Context, ls []attribute.KeyValue, ms ...Measurement) { - if m.impl == nil { - return - } - m.impl.RecordBatch(ctx, ls, ms...) -} - -// NewBatchObserver creates a new BatchObserver that supports -// making batches of observations for multiple instruments. -func (m Meter) NewBatchObserver(callback BatchObserverFunc) BatchObserver { - return BatchObserver{ - meter: m, - runner: newBatchAsyncRunner(callback), - } -} - -// NewInt64Counter creates a new integer Counter instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64Counter(name string, options ...InstrumentOption) (Int64Counter, error) { - return wrapInt64CounterInstrument( - m.newSync(name, sdkapi.CounterInstrumentKind, number.Int64Kind, options)) -} - -// NewFloat64Counter creates a new floating point Counter with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64Counter(name string, options ...InstrumentOption) (Float64Counter, error) { - return wrapFloat64CounterInstrument( - m.newSync(name, sdkapi.CounterInstrumentKind, number.Float64Kind, options)) -} - -// NewInt64UpDownCounter creates a new integer UpDownCounter instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64UpDownCounter(name string, options ...InstrumentOption) (Int64UpDownCounter, error) { - return wrapInt64UpDownCounterInstrument( - m.newSync(name, sdkapi.UpDownCounterInstrumentKind, number.Int64Kind, options)) -} - -// NewFloat64UpDownCounter creates a new floating point UpDownCounter with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64UpDownCounter(name string, options ...InstrumentOption) (Float64UpDownCounter, error) { - return wrapFloat64UpDownCounterInstrument( - m.newSync(name, sdkapi.UpDownCounterInstrumentKind, number.Float64Kind, options)) -} - -// NewInt64Histogram creates a new integer Histogram instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64Histogram(name string, opts ...InstrumentOption) (Int64Histogram, error) { - return wrapInt64HistogramInstrument( - m.newSync(name, sdkapi.HistogramInstrumentKind, number.Int64Kind, opts)) -} - -// NewFloat64Histogram creates a new floating point Histogram with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64Histogram(name string, opts ...InstrumentOption) (Float64Histogram, error) { - return wrapFloat64HistogramInstrument( - m.newSync(name, sdkapi.HistogramInstrumentKind, number.Float64Kind, opts)) -} - -// NewInt64GaugeObserver creates a new integer GaugeObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64GaugeObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64GaugeObserver, error) { - if callback == nil { - return wrapInt64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64GaugeObserverInstrument( - m.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64GaugeObserver creates a new floating point GaugeObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64GaugeObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64GaugeObserver, error) { - if callback == nil { - return wrapFloat64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64GaugeObserverInstrument( - m.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64CounterObserver creates a new integer CounterObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64CounterObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64CounterObserver, error) { - if callback == nil { - return wrapInt64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64CounterObserverInstrument( - m.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64CounterObserver creates a new floating point CounterObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64CounterObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64CounterObserver, error) { - if callback == nil { - return wrapFloat64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64CounterObserverInstrument( - m.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64UpDownCounterObserver creates a new integer UpDownCounterObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64UpDownCounterObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownCounterObserver, error) { - if callback == nil { - return wrapInt64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64UpDownCounterObserverInstrument( - m.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64UpDownCounterObserver creates a new floating point UpDownCounterObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64UpDownCounterObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64UpDownCounterObserver, error) { - if callback == nil { - return wrapFloat64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64UpDownCounterObserverInstrument( - m.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64GaugeObserver creates a new integer GaugeObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64GaugeObserver(name string, opts ...InstrumentOption) (Int64GaugeObserver, error) { - if b.runner == nil { - return wrapInt64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64GaugeObserverInstrument( - b.meter.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64GaugeObserver creates a new floating point GaugeObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64GaugeObserver(name string, opts ...InstrumentOption) (Float64GaugeObserver, error) { - if b.runner == nil { - return wrapFloat64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64GaugeObserverInstrument( - b.meter.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// NewInt64CounterObserver creates a new integer CounterObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64CounterObserver(name string, opts ...InstrumentOption) (Int64CounterObserver, error) { - if b.runner == nil { - return wrapInt64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64CounterObserverInstrument( - b.meter.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64CounterObserver creates a new floating point CounterObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64CounterObserver(name string, opts ...InstrumentOption) (Float64CounterObserver, error) { - if b.runner == nil { - return wrapFloat64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64CounterObserverInstrument( - b.meter.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// NewInt64UpDownCounterObserver creates a new integer UpDownCounterObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64UpDownCounterObserver(name string, opts ...InstrumentOption) (Int64UpDownCounterObserver, error) { - if b.runner == nil { - return wrapInt64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64UpDownCounterObserverInstrument( - b.meter.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64UpDownCounterObserver creates a new floating point UpDownCounterObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64UpDownCounterObserver(name string, opts ...InstrumentOption) (Float64UpDownCounterObserver, error) { - if b.runner == nil { - return wrapFloat64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64UpDownCounterObserverInstrument( - b.meter.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// MeterImpl returns the underlying MeterImpl of this Meter. -func (m Meter) MeterImpl() sdkapi.MeterImpl { - return m.impl -} - -// newAsync constructs one new asynchronous instrument. -func (m Meter) newAsync( - name string, - mkind sdkapi.InstrumentKind, - nkind number.Kind, - opts []InstrumentOption, - runner sdkapi.AsyncRunner, -) ( - sdkapi.AsyncImpl, - error, -) { - if m.impl == nil { - return sdkapi.NewNoopAsyncInstrument(), nil - } - cfg := NewInstrumentConfig(opts...) - desc := sdkapi.NewDescriptor(name, mkind, nkind, cfg.description, cfg.unit) - return m.impl.NewAsyncInstrument(desc, runner) -} - -// newSync constructs one new synchronous instrument. -func (m Meter) newSync( - name string, - metricKind sdkapi.InstrumentKind, - numberKind number.Kind, - opts []InstrumentOption, -) ( - sdkapi.SyncImpl, - error, -) { - if m.impl == nil { - return sdkapi.NewNoopSyncInstrument(), nil - } - cfg := NewInstrumentConfig(opts...) - desc := sdkapi.NewDescriptor(name, metricKind, numberKind, cfg.description, cfg.unit) - return m.impl.NewSyncInstrument(desc) -} - -// MeterMust is a wrapper for Meter interfaces that panics when any -// instrument constructor encounters an error. -type MeterMust struct { - meter Meter -} - -// BatchObserverMust is a wrapper for BatchObserver that panics when -// any instrument constructor encounters an error. -type BatchObserverMust struct { - batch BatchObserver -} - -// Must constructs a MeterMust implementation from a Meter, allowing -// the application to panic when any instrument constructor yields an -// error. -func Must(meter Meter) MeterMust { - return MeterMust{meter: meter} -} - -// NewInt64Counter calls `Meter.NewInt64Counter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64Counter(name string, cos ...InstrumentOption) Int64Counter { - if inst, err := mm.meter.NewInt64Counter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64Counter calls `Meter.NewFloat64Counter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64Counter(name string, cos ...InstrumentOption) Float64Counter { - if inst, err := mm.meter.NewFloat64Counter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounter calls `Meter.NewInt64UpDownCounter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64UpDownCounter(name string, cos ...InstrumentOption) Int64UpDownCounter { - if inst, err := mm.meter.NewInt64UpDownCounter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounter calls `Meter.NewFloat64UpDownCounter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64UpDownCounter(name string, cos ...InstrumentOption) Float64UpDownCounter { - if inst, err := mm.meter.NewFloat64UpDownCounter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64Histogram calls `Meter.NewInt64Histogram` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64Histogram(name string, mos ...InstrumentOption) Int64Histogram { - if inst, err := mm.meter.NewInt64Histogram(name, mos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64Histogram calls `Meter.NewFloat64Histogram` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64Histogram(name string, mos ...InstrumentOption) Float64Histogram { - if inst, err := mm.meter.NewFloat64Histogram(name, mos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64GaugeObserver calls `Meter.NewInt64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64GaugeObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64GaugeObserver { - if inst, err := mm.meter.NewInt64GaugeObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64GaugeObserver calls `Meter.NewFloat64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64GaugeObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64GaugeObserver { - if inst, err := mm.meter.NewFloat64GaugeObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64CounterObserver calls `Meter.NewInt64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64CounterObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64CounterObserver { - if inst, err := mm.meter.NewInt64CounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64CounterObserver calls `Meter.NewFloat64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64CounterObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64CounterObserver { - if inst, err := mm.meter.NewFloat64CounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounterObserver calls `Meter.NewInt64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64UpDownCounterObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64UpDownCounterObserver { - if inst, err := mm.meter.NewInt64UpDownCounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounterObserver calls `Meter.NewFloat64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64UpDownCounterObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64UpDownCounterObserver { - if inst, err := mm.meter.NewFloat64UpDownCounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewBatchObserver returns a wrapper around BatchObserver that panics -// when any instrument constructor returns an error. -func (mm MeterMust) NewBatchObserver(callback BatchObserverFunc) BatchObserverMust { - return BatchObserverMust{ - batch: mm.meter.NewBatchObserver(callback), - } -} - -// NewInt64GaugeObserver calls `BatchObserver.NewInt64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64GaugeObserver(name string, oos ...InstrumentOption) Int64GaugeObserver { - if inst, err := bm.batch.NewInt64GaugeObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64GaugeObserver calls `BatchObserver.NewFloat64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64GaugeObserver(name string, oos ...InstrumentOption) Float64GaugeObserver { - if inst, err := bm.batch.NewFloat64GaugeObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64CounterObserver calls `BatchObserver.NewInt64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64CounterObserver(name string, oos ...InstrumentOption) Int64CounterObserver { - if inst, err := bm.batch.NewInt64CounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64CounterObserver calls `BatchObserver.NewFloat64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64CounterObserver(name string, oos ...InstrumentOption) Float64CounterObserver { - if inst, err := bm.batch.NewFloat64CounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounterObserver calls `BatchObserver.NewInt64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64UpDownCounterObserver(name string, oos ...InstrumentOption) Int64UpDownCounterObserver { - if inst, err := bm.batch.NewInt64UpDownCounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounterObserver calls `BatchObserver.NewFloat64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64UpDownCounterObserver(name string, oos ...InstrumentOption) Float64UpDownCounterObserver { - if inst, err := bm.batch.NewFloat64UpDownCounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} diff --git a/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go b/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go deleted file mode 100644 index 2da24c8f21..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - "errors" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// ErrSDKReturnedNilImpl is returned when a new `MeterImpl` returns nil. -var ErrSDKReturnedNilImpl = errors.New("SDK returned a nil implementation") - -// Int64ObserverFunc is a type of callback that integral -// observers run. -type Int64ObserverFunc func(context.Context, Int64ObserverResult) - -// Float64ObserverFunc is a type of callback that floating point -// observers run. -type Float64ObserverFunc func(context.Context, Float64ObserverResult) - -// BatchObserverFunc is a callback argument for use with any -// Observer instrument that will be reported as a batch of -// observations. -type BatchObserverFunc func(context.Context, BatchObserverResult) - -// Int64ObserverResult is passed to an observer callback to capture -// observations for one asynchronous integer metric instrument. -type Int64ObserverResult struct { - instrument sdkapi.AsyncImpl - function func([]attribute.KeyValue, ...Observation) -} - -// Float64ObserverResult is passed to an observer callback to capture -// observations for one asynchronous floating point metric instrument. -type Float64ObserverResult struct { - instrument sdkapi.AsyncImpl - function func([]attribute.KeyValue, ...Observation) -} - -// BatchObserverResult is passed to a batch observer callback to -// capture observations for multiple asynchronous instruments. -type BatchObserverResult struct { - function func([]attribute.KeyValue, ...Observation) -} - -// Observe captures a single integer value from the associated -// instrument callback, with the given labels. -func (ir Int64ObserverResult) Observe(value int64, labels ...attribute.KeyValue) { - ir.function(labels, sdkapi.NewObservation(ir.instrument, number.NewInt64Number(value))) -} - -// Observe captures a single floating point value from the associated -// instrument callback, with the given labels. -func (fr Float64ObserverResult) Observe(value float64, labels ...attribute.KeyValue) { - fr.function(labels, sdkapi.NewObservation(fr.instrument, number.NewFloat64Number(value))) -} - -// Observe captures a multiple observations from the associated batch -// instrument callback, with the given labels. -func (br BatchObserverResult) Observe(labels []attribute.KeyValue, obs ...Observation) { - br.function(labels, obs...) -} - -var _ sdkapi.AsyncSingleRunner = (*Int64ObserverFunc)(nil) -var _ sdkapi.AsyncSingleRunner = (*Float64ObserverFunc)(nil) -var _ sdkapi.AsyncBatchRunner = (*BatchObserverFunc)(nil) - -// newInt64AsyncRunner returns a single-observer callback for integer Observer instruments. -func newInt64AsyncRunner(c Int64ObserverFunc) sdkapi.AsyncSingleRunner { - return &c -} - -// newFloat64AsyncRunner returns a single-observer callback for floating point Observer instruments. -func newFloat64AsyncRunner(c Float64ObserverFunc) sdkapi.AsyncSingleRunner { - return &c -} - -// newBatchAsyncRunner returns a batch-observer callback use with multiple Observer instruments. -func newBatchAsyncRunner(c BatchObserverFunc) sdkapi.AsyncBatchRunner { - return &c -} - -// AnyRunner implements AsyncRunner. -func (*Int64ObserverFunc) AnyRunner() {} - -// AnyRunner implements AsyncRunner. -func (*Float64ObserverFunc) AnyRunner() {} - -// AnyRunner implements AsyncRunner. -func (*BatchObserverFunc) AnyRunner() {} - -// Run implements AsyncSingleRunner. -func (i *Int64ObserverFunc) Run(ctx context.Context, impl sdkapi.AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { - (*i)(ctx, Int64ObserverResult{ - instrument: impl, - function: function, - }) -} - -// Run implements AsyncSingleRunner. -func (f *Float64ObserverFunc) Run(ctx context.Context, impl sdkapi.AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { - (*f)(ctx, Float64ObserverResult{ - instrument: impl, - function: function, - }) -} - -// Run implements AsyncBatchRunner. -func (b *BatchObserverFunc) Run(ctx context.Context, function func([]attribute.KeyValue, ...Observation)) { - (*b)(ctx, BatchObserverResult{ - function: function, - }) -} - -// wrapInt64GaugeObserverInstrument converts an AsyncImpl into Int64GaugeObserver. -func wrapInt64GaugeObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64GaugeObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64GaugeObserver{asyncInstrument: common}, err -} - -// wrapFloat64GaugeObserverInstrument converts an AsyncImpl into Float64GaugeObserver. -func wrapFloat64GaugeObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64GaugeObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64GaugeObserver{asyncInstrument: common}, err -} - -// wrapInt64CounterObserverInstrument converts an AsyncImpl into Int64CounterObserver. -func wrapInt64CounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64CounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64CounterObserver{asyncInstrument: common}, err -} - -// wrapFloat64CounterObserverInstrument converts an AsyncImpl into Float64CounterObserver. -func wrapFloat64CounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64CounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64CounterObserver{asyncInstrument: common}, err -} - -// wrapInt64UpDownCounterObserverInstrument converts an AsyncImpl into Int64UpDownCounterObserver. -func wrapInt64UpDownCounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64UpDownCounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64UpDownCounterObserver{asyncInstrument: common}, err -} - -// wrapFloat64UpDownCounterObserverInstrument converts an AsyncImpl into Float64UpDownCounterObserver. -func wrapFloat64UpDownCounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64UpDownCounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64UpDownCounterObserver{asyncInstrument: common}, err -} - -// BatchObserver represents an Observer callback that can report -// observations for multiple instruments. -type BatchObserver struct { - meter Meter - runner sdkapi.AsyncBatchRunner -} - -// Int64GaugeObserver is a metric that captures a set of int64 values at a -// point in time. -type Int64GaugeObserver struct { - asyncInstrument -} - -// Float64GaugeObserver is a metric that captures a set of float64 values -// at a point in time. -type Float64GaugeObserver struct { - asyncInstrument -} - -// Int64CounterObserver is a metric that captures a precomputed sum of -// int64 values at a point in time. -type Int64CounterObserver struct { - asyncInstrument -} - -// Float64CounterObserver is a metric that captures a precomputed sum of -// float64 values at a point in time. -type Float64CounterObserver struct { - asyncInstrument -} - -// Int64UpDownCounterObserver is a metric that captures a precomputed sum of -// int64 values at a point in time. -type Int64UpDownCounterObserver struct { - asyncInstrument -} - -// Float64UpDownCounterObserver is a metric that captures a precomputed sum of -// float64 values at a point in time. -type Float64UpDownCounterObserver struct { - asyncInstrument -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64GaugeObserver) Observation(v int64) Observation { - return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64GaugeObserver) Observation(v float64) Observation { - return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64CounterObserver) Observation(v int64) Observation { - return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64CounterObserver) Observation(v float64) Observation { - return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64UpDownCounterObserver) Observation(v int64) Observation { - return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64UpDownCounterObserver) Observation(v float64) Observation { - return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) -} - -// syncInstrument contains a SyncImpl. -type syncInstrument struct { - instrument sdkapi.SyncImpl -} - -// asyncInstrument contains a AsyncImpl. -type asyncInstrument struct { - instrument sdkapi.AsyncImpl -} - -// AsyncImpl implements AsyncImpl. -func (a asyncInstrument) AsyncImpl() sdkapi.AsyncImpl { - return a.instrument -} - -// SyncImpl returns the implementation object for synchronous instruments. -func (s syncInstrument) SyncImpl() sdkapi.SyncImpl { - return s.instrument -} - -func (s syncInstrument) float64Measurement(value float64) Measurement { - return sdkapi.NewMeasurement(s.instrument, number.NewFloat64Number(value)) -} - -func (s syncInstrument) int64Measurement(value int64) Measurement { - return sdkapi.NewMeasurement(s.instrument, number.NewInt64Number(value)) -} - -func (s syncInstrument) directRecord(ctx context.Context, number number.Number, labels []attribute.KeyValue) { - s.instrument.RecordOne(ctx, number, labels) -} - -// checkNewAsync receives an AsyncImpl and potential -// error, and returns the same types, checking for and ensuring that -// the returned interface is not nil. -func checkNewAsync(instrument sdkapi.AsyncImpl, err error) (asyncInstrument, error) { - if instrument == nil { - if err == nil { - err = ErrSDKReturnedNilImpl - } - instrument = sdkapi.NewNoopAsyncInstrument() - } - return asyncInstrument{ - instrument: instrument, - }, err -} - -// checkNewSync receives an SyncImpl and potential -// error, and returns the same types, checking for and ensuring that -// the returned interface is not nil. -func checkNewSync(instrument sdkapi.SyncImpl, err error) (syncInstrument, error) { - if instrument == nil { - if err == nil { - err = ErrSDKReturnedNilImpl - } - // Note: an alternate behavior would be to synthesize a new name - // or group all duplicately-named instruments of a certain type - // together and use a tag for the original name, e.g., - // name = 'invalid.counter.int64' - // label = 'original-name=duplicate-counter-name' - instrument = sdkapi.NewNoopSyncInstrument() - } - return syncInstrument{ - instrument: instrument, - }, err -} - -// wrapInt64CounterInstrument converts a SyncImpl into Int64Counter. -func wrapInt64CounterInstrument(syncInst sdkapi.SyncImpl, err error) (Int64Counter, error) { - common, err := checkNewSync(syncInst, err) - return Int64Counter{syncInstrument: common}, err -} - -// wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter. -func wrapFloat64CounterInstrument(syncInst sdkapi.SyncImpl, err error) (Float64Counter, error) { - common, err := checkNewSync(syncInst, err) - return Float64Counter{syncInstrument: common}, err -} - -// wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter. -func wrapInt64UpDownCounterInstrument(syncInst sdkapi.SyncImpl, err error) (Int64UpDownCounter, error) { - common, err := checkNewSync(syncInst, err) - return Int64UpDownCounter{syncInstrument: common}, err -} - -// wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter. -func wrapFloat64UpDownCounterInstrument(syncInst sdkapi.SyncImpl, err error) (Float64UpDownCounter, error) { - common, err := checkNewSync(syncInst, err) - return Float64UpDownCounter{syncInstrument: common}, err -} - -// wrapInt64HistogramInstrument converts a SyncImpl into Int64Histogram. -func wrapInt64HistogramInstrument(syncInst sdkapi.SyncImpl, err error) (Int64Histogram, error) { - common, err := checkNewSync(syncInst, err) - return Int64Histogram{syncInstrument: common}, err -} - -// wrapFloat64HistogramInstrument converts a SyncImpl into Float64Histogram. -func wrapFloat64HistogramInstrument(syncInst sdkapi.SyncImpl, err error) (Float64Histogram, error) { - common, err := checkNewSync(syncInst, err) - return Float64Histogram{syncInstrument: common}, err -} - -// Float64Counter is a metric that accumulates float64 values. -type Float64Counter struct { - syncInstrument -} - -// Int64Counter is a metric that accumulates int64 values. -type Int64Counter struct { - syncInstrument -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64Counter) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64Counter) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Float64Counter) Add(ctx context.Context, value float64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Int64Counter) Add(ctx context.Context, value int64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} - -// Float64UpDownCounter is a metric instrument that sums floating -// point values. -type Float64UpDownCounter struct { - syncInstrument -} - -// Int64UpDownCounter is a metric instrument that sums integer values. -type Int64UpDownCounter struct { - syncInstrument -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64UpDownCounter) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64UpDownCounter) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Float64UpDownCounter) Add(ctx context.Context, value float64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Int64UpDownCounter) Add(ctx context.Context, value int64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} - -// Float64Histogram is a metric that records float64 values. -type Float64Histogram struct { - syncInstrument -} - -// Int64Histogram is a metric that records int64 values. -type Int64Histogram struct { - syncInstrument -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64Histogram) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64Histogram) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Record adds a new value to the list of Histogram's records. The -// labels should contain the keys and values to be associated with -// this value. -func (c Float64Histogram) Record(ctx context.Context, value float64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Record adds a new value to the Histogram's distribution. The -// labels should contain the keys and values to be associated with -// this value. -func (c Int64Histogram) Record(ctx context.Context, value int64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop.go deleted file mode 100644 index 37c653f51a..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/noop.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -type noopMeterProvider struct{} - -// NewNoopMeterProvider returns an implementation of MeterProvider that -// performs no operations. The Meter and Instrument created from the returned -// MeterProvider also perform no operations. -func NewNoopMeterProvider() MeterProvider { - return noopMeterProvider{} -} - -var _ MeterProvider = noopMeterProvider{} - -func (noopMeterProvider) Meter(instrumentationName string, opts ...MeterOption) Meter { - return Meter{} -} diff --git a/vendor/go.opentelemetry.io/otel/metric/number/doc.go b/vendor/go.opentelemetry.io/otel/metric/number/doc.go deleted file mode 100644 index 0649ff875e..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/number/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package number provides a number abstraction for instruments that -either support int64 or float64 input values. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ -package number // import "go.opentelemetry.io/otel/metric/number" diff --git a/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go b/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go deleted file mode 100644 index 6288c7ea29..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=Kind"; DO NOT EDIT. - -package number - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Int64Kind-0] - _ = x[Float64Kind-1] -} - -const _Kind_name = "Int64KindFloat64Kind" - -var _Kind_index = [...]uint8{0, 9, 20} - -func (i Kind) String() string { - if i < 0 || i >= Kind(len(_Kind_index)-1) { - return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] -} diff --git a/vendor/go.opentelemetry.io/otel/metric/number/number.go b/vendor/go.opentelemetry.io/otel/metric/number/number.go deleted file mode 100644 index 3ec95e2014..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/number/number.go +++ /dev/null @@ -1,538 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package number // import "go.opentelemetry.io/otel/metric/number" - -//go:generate stringer -type=Kind - -import ( - "fmt" - "math" - "sync/atomic" - - "go.opentelemetry.io/otel/internal" -) - -// Kind describes the data type of the Number. -type Kind int8 - -const ( - // Int64Kind means that the Number stores int64. - Int64Kind Kind = iota - // Float64Kind means that the Number stores float64. - Float64Kind -) - -// Zero returns a zero value for a given Kind -func (k Kind) Zero() Number { - switch k { - case Int64Kind: - return NewInt64Number(0) - case Float64Kind: - return NewFloat64Number(0.) - default: - return Number(0) - } -} - -// Minimum returns the minimum representable value -// for a given Kind -func (k Kind) Minimum() Number { - switch k { - case Int64Kind: - return NewInt64Number(math.MinInt64) - case Float64Kind: - return NewFloat64Number(-1. * math.MaxFloat64) - default: - return Number(0) - } -} - -// Maximum returns the maximum representable value -// for a given Kind -func (k Kind) Maximum() Number { - switch k { - case Int64Kind: - return NewInt64Number(math.MaxInt64) - case Float64Kind: - return NewFloat64Number(math.MaxFloat64) - default: - return Number(0) - } -} - -// Number represents either an integral or a floating point value. It -// needs to be accompanied with a source of Kind that describes -// the actual type of the value stored within Number. -type Number uint64 - -// - constructors - -// NewNumberFromRaw creates a new Number from a raw value. -func NewNumberFromRaw(r uint64) Number { - return Number(r) -} - -// NewInt64Number creates an integral Number. -func NewInt64Number(i int64) Number { - return NewNumberFromRaw(internal.Int64ToRaw(i)) -} - -// NewFloat64Number creates a floating point Number. -func NewFloat64Number(f float64) Number { - return NewNumberFromRaw(internal.Float64ToRaw(f)) -} - -// NewNumberSignChange returns a number with the same magnitude and -// the opposite sign. `kind` must describe the kind of number in `nn`. -func NewNumberSignChange(kind Kind, nn Number) Number { - switch kind { - case Int64Kind: - return NewInt64Number(-nn.AsInt64()) - case Float64Kind: - return NewFloat64Number(-nn.AsFloat64()) - } - return nn -} - -// - as x - -// AsNumber gets the Number. -func (n *Number) AsNumber() Number { - return *n -} - -// AsRaw gets the uninterpreted raw value. Might be useful for some -// atomic operations. -func (n *Number) AsRaw() uint64 { - return uint64(*n) -} - -// AsInt64 assumes that the value contains an int64 and returns it as -// such. -func (n *Number) AsInt64() int64 { - return internal.RawToInt64(n.AsRaw()) -} - -// AsFloat64 assumes that the measurement value contains a float64 and -// returns it as such. -func (n *Number) AsFloat64() float64 { - return internal.RawToFloat64(n.AsRaw()) -} - -// - as x atomic - -// AsNumberAtomic gets the Number atomically. -func (n *Number) AsNumberAtomic() Number { - return NewNumberFromRaw(n.AsRawAtomic()) -} - -// AsRawAtomic gets the uninterpreted raw value atomically. Might be -// useful for some atomic operations. -func (n *Number) AsRawAtomic() uint64 { - return atomic.LoadUint64(n.AsRawPtr()) -} - -// AsInt64Atomic assumes that the number contains an int64 and returns -// it as such atomically. -func (n *Number) AsInt64Atomic() int64 { - return atomic.LoadInt64(n.AsInt64Ptr()) -} - -// AsFloat64Atomic assumes that the measurement value contains a -// float64 and returns it as such atomically. -func (n *Number) AsFloat64Atomic() float64 { - return internal.RawToFloat64(n.AsRawAtomic()) -} - -// - as x ptr - -// AsRawPtr gets the pointer to the raw, uninterpreted raw -// value. Might be useful for some atomic operations. -func (n *Number) AsRawPtr() *uint64 { - return (*uint64)(n) -} - -// AsInt64Ptr assumes that the number contains an int64 and returns a -// pointer to it. -func (n *Number) AsInt64Ptr() *int64 { - return internal.RawPtrToInt64Ptr(n.AsRawPtr()) -} - -// AsFloat64Ptr assumes that the number contains a float64 and returns a -// pointer to it. -func (n *Number) AsFloat64Ptr() *float64 { - return internal.RawPtrToFloat64Ptr(n.AsRawPtr()) -} - -// - coerce - -// CoerceToInt64 casts the number to int64. May result in -// data/precision loss. -func (n *Number) CoerceToInt64(kind Kind) int64 { - switch kind { - case Int64Kind: - return n.AsInt64() - case Float64Kind: - return int64(n.AsFloat64()) - default: - // you get what you deserve - return 0 - } -} - -// CoerceToFloat64 casts the number to float64. May result in -// data/precision loss. -func (n *Number) CoerceToFloat64(kind Kind) float64 { - switch kind { - case Int64Kind: - return float64(n.AsInt64()) - case Float64Kind: - return n.AsFloat64() - default: - // you get what you deserve - return 0 - } -} - -// - set - -// SetNumber sets the number to the passed number. Both should be of -// the same kind. -func (n *Number) SetNumber(nn Number) { - *n.AsRawPtr() = nn.AsRaw() -} - -// SetRaw sets the number to the passed raw value. Both number and the -// raw number should represent the same kind. -func (n *Number) SetRaw(r uint64) { - *n.AsRawPtr() = r -} - -// SetInt64 assumes that the number contains an int64 and sets it to -// the passed value. -func (n *Number) SetInt64(i int64) { - *n.AsInt64Ptr() = i -} - -// SetFloat64 assumes that the number contains a float64 and sets it -// to the passed value. -func (n *Number) SetFloat64(f float64) { - *n.AsFloat64Ptr() = f -} - -// - set atomic - -// SetNumberAtomic sets the number to the passed number -// atomically. Both should be of the same kind. -func (n *Number) SetNumberAtomic(nn Number) { - atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw()) -} - -// SetRawAtomic sets the number to the passed raw value -// atomically. Both number and the raw number should represent the -// same kind. -func (n *Number) SetRawAtomic(r uint64) { - atomic.StoreUint64(n.AsRawPtr(), r) -} - -// SetInt64Atomic assumes that the number contains an int64 and sets -// it to the passed value atomically. -func (n *Number) SetInt64Atomic(i int64) { - atomic.StoreInt64(n.AsInt64Ptr(), i) -} - -// SetFloat64Atomic assumes that the number contains a float64 and -// sets it to the passed value atomically. -func (n *Number) SetFloat64Atomic(f float64) { - atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f)) -} - -// - swap - -// SwapNumber sets the number to the passed number and returns the old -// number. Both this number and the passed number should be of the -// same kind. -func (n *Number) SwapNumber(nn Number) Number { - old := *n - n.SetNumber(nn) - return old -} - -// SwapRaw sets the number to the passed raw value and returns the old -// raw value. Both number and the raw number should represent the same -// kind. -func (n *Number) SwapRaw(r uint64) uint64 { - old := n.AsRaw() - n.SetRaw(r) - return old -} - -// SwapInt64 assumes that the number contains an int64, sets it to the -// passed value and returns the old int64 value. -func (n *Number) SwapInt64(i int64) int64 { - old := n.AsInt64() - n.SetInt64(i) - return old -} - -// SwapFloat64 assumes that the number contains an float64, sets it to -// the passed value and returns the old float64 value. -func (n *Number) SwapFloat64(f float64) float64 { - old := n.AsFloat64() - n.SetFloat64(f) - return old -} - -// - swap atomic - -// SwapNumberAtomic sets the number to the passed number and returns -// the old number atomically. Both this number and the passed number -// should be of the same kind. -func (n *Number) SwapNumberAtomic(nn Number) Number { - return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw())) -} - -// SwapRawAtomic sets the number to the passed raw value and returns -// the old raw value atomically. Both number and the raw number should -// represent the same kind. -func (n *Number) SwapRawAtomic(r uint64) uint64 { - return atomic.SwapUint64(n.AsRawPtr(), r) -} - -// SwapInt64Atomic assumes that the number contains an int64, sets it -// to the passed value and returns the old int64 value atomically. -func (n *Number) SwapInt64Atomic(i int64) int64 { - return atomic.SwapInt64(n.AsInt64Ptr(), i) -} - -// SwapFloat64Atomic assumes that the number contains an float64, sets -// it to the passed value and returns the old float64 value -// atomically. -func (n *Number) SwapFloat64Atomic(f float64) float64 { - return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f))) -} - -// - add - -// AddNumber assumes that this and the passed number are of the passed -// kind and adds the passed number to this number. -func (n *Number) AddNumber(kind Kind, nn Number) { - switch kind { - case Int64Kind: - n.AddInt64(nn.AsInt64()) - case Float64Kind: - n.AddFloat64(nn.AsFloat64()) - } -} - -// AddRaw assumes that this number and the passed raw value are of the -// passed kind and adds the passed raw value to this number. -func (n *Number) AddRaw(kind Kind, r uint64) { - n.AddNumber(kind, NewNumberFromRaw(r)) -} - -// AddInt64 assumes that the number contains an int64 and adds the -// passed int64 to it. -func (n *Number) AddInt64(i int64) { - *n.AsInt64Ptr() += i -} - -// AddFloat64 assumes that the number contains a float64 and adds the -// passed float64 to it. -func (n *Number) AddFloat64(f float64) { - *n.AsFloat64Ptr() += f -} - -// - add atomic - -// AddNumberAtomic assumes that this and the passed number are of the -// passed kind and adds the passed number to this number atomically. -func (n *Number) AddNumberAtomic(kind Kind, nn Number) { - switch kind { - case Int64Kind: - n.AddInt64Atomic(nn.AsInt64()) - case Float64Kind: - n.AddFloat64Atomic(nn.AsFloat64()) - } -} - -// AddRawAtomic assumes that this number and the passed raw value are -// of the passed kind and adds the passed raw value to this number -// atomically. -func (n *Number) AddRawAtomic(kind Kind, r uint64) { - n.AddNumberAtomic(kind, NewNumberFromRaw(r)) -} - -// AddInt64Atomic assumes that the number contains an int64 and adds -// the passed int64 to it atomically. -func (n *Number) AddInt64Atomic(i int64) { - atomic.AddInt64(n.AsInt64Ptr(), i) -} - -// AddFloat64Atomic assumes that the number contains a float64 and -// adds the passed float64 to it atomically. -func (n *Number) AddFloat64Atomic(f float64) { - for { - o := n.AsFloat64Atomic() - if n.CompareAndSwapFloat64(o, o+f) { - break - } - } -} - -// - compare and swap (atomic only) - -// CompareAndSwapNumber does the atomic CAS operation on this -// number. This number and passed old and new numbers should be of the -// same kind. -func (n *Number) CompareAndSwapNumber(on, nn Number) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw()) -} - -// CompareAndSwapRaw does the atomic CAS operation on this -// number. This number and passed old and new raw values should be of -// the same kind. -func (n *Number) CompareAndSwapRaw(or, nr uint64) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr) -} - -// CompareAndSwapInt64 assumes that this number contains an int64 and -// does the atomic CAS operation on it. -func (n *Number) CompareAndSwapInt64(oi, ni int64) bool { - return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni) -} - -// CompareAndSwapFloat64 assumes that this number contains a float64 and -// does the atomic CAS operation on it. -func (n *Number) CompareAndSwapFloat64(of, nf float64) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf)) -} - -// - compare - -// CompareNumber compares two Numbers given their kind. Both numbers -// should have the same kind. This returns: -// 0 if the numbers are equal -// -1 if the subject `n` is less than the argument `nn` -// +1 if the subject `n` is greater than the argument `nn` -func (n *Number) CompareNumber(kind Kind, nn Number) int { - switch kind { - case Int64Kind: - return n.CompareInt64(nn.AsInt64()) - case Float64Kind: - return n.CompareFloat64(nn.AsFloat64()) - default: - // you get what you deserve - return 0 - } -} - -// CompareRaw compares two numbers, where one is input as a raw -// uint64, interpreting both values as a `kind` of number. -func (n *Number) CompareRaw(kind Kind, r uint64) int { - return n.CompareNumber(kind, NewNumberFromRaw(r)) -} - -// CompareInt64 assumes that the Number contains an int64 and performs -// a comparison between the value and the other value. It returns the -// typical result of the compare function: -1 if the value is less -// than the other, 0 if both are equal, 1 if the value is greater than -// the other. -func (n *Number) CompareInt64(i int64) int { - this := n.AsInt64() - if this < i { - return -1 - } else if this > i { - return 1 - } - return 0 -} - -// CompareFloat64 assumes that the Number contains a float64 and -// performs a comparison between the value and the other value. It -// returns the typical result of the compare function: -1 if the value -// is less than the other, 0 if both are equal, 1 if the value is -// greater than the other. -// -// Do not compare NaN values. -func (n *Number) CompareFloat64(f float64) int { - this := n.AsFloat64() - if this < f { - return -1 - } else if this > f { - return 1 - } - return 0 -} - -// - relations to zero - -// IsPositive returns true if the actual value is greater than zero. -func (n *Number) IsPositive(kind Kind) bool { - return n.compareWithZero(kind) > 0 -} - -// IsNegative returns true if the actual value is less than zero. -func (n *Number) IsNegative(kind Kind) bool { - return n.compareWithZero(kind) < 0 -} - -// IsZero returns true if the actual value is equal to zero. -func (n *Number) IsZero(kind Kind) bool { - return n.compareWithZero(kind) == 0 -} - -// - misc - -// Emit returns a string representation of the raw value of the -// Number. A %d is used for integral values, %f for floating point -// values. -func (n *Number) Emit(kind Kind) string { - switch kind { - case Int64Kind: - return fmt.Sprintf("%d", n.AsInt64()) - case Float64Kind: - return fmt.Sprintf("%f", n.AsFloat64()) - default: - return "" - } -} - -// AsInterface returns the number as an interface{}, typically used -// for Kind-correct JSON conversion. -func (n *Number) AsInterface(kind Kind) interface{} { - switch kind { - case Int64Kind: - return n.AsInt64() - case Float64Kind: - return n.AsFloat64() - default: - return math.NaN() - } -} - -// - private stuff - -func (n *Number) compareWithZero(kind Kind) int { - switch kind { - case Int64Kind: - return n.CompareInt64(0) - case Float64Kind: - return n.CompareFloat64(0.) - default: - // you get what you deserve - return 0 - } -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go deleted file mode 100644 index 14eb0532e4..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -import ( - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/unit" -) - -// Descriptor contains all the settings that describe an instrument, -// including its name, metric kind, number kind, and the configurable -// options. -type Descriptor struct { - name string - instrumentKind InstrumentKind - numberKind number.Kind - description string - unit unit.Unit -} - -// NewDescriptor returns a Descriptor with the given contents. -func NewDescriptor(name string, ikind InstrumentKind, nkind number.Kind, description string, unit unit.Unit) Descriptor { - return Descriptor{ - name: name, - instrumentKind: ikind, - numberKind: nkind, - description: description, - unit: unit, - } -} - -// Name returns the metric instrument's name. -func (d Descriptor) Name() string { - return d.name -} - -// InstrumentKind returns the specific kind of instrument. -func (d Descriptor) InstrumentKind() InstrumentKind { - return d.instrumentKind -} - -// Description provides a human-readable description of the metric -// instrument. -func (d Descriptor) Description() string { - return d.description -} - -// Unit describes the units of the metric instrument. Unitless -// metrics return the empty string. -func (d Descriptor) Unit() unit.Unit { - return d.unit -} - -// NumberKind returns whether this instrument is declared over int64, -// float64, or uint64 values. -func (d Descriptor) NumberKind() number.Kind { - return d.numberKind -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go deleted file mode 100644 index 64aa5ead12..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate stringer -type=InstrumentKind - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -// InstrumentKind describes the kind of instrument. -type InstrumentKind int8 - -const ( - // HistogramInstrumentKind indicates a Histogram instrument. - HistogramInstrumentKind InstrumentKind = iota - // GaugeObserverInstrumentKind indicates an GaugeObserver instrument. - GaugeObserverInstrumentKind - - // CounterInstrumentKind indicates a Counter instrument. - CounterInstrumentKind - // UpDownCounterInstrumentKind indicates a UpDownCounter instrument. - UpDownCounterInstrumentKind - - // CounterObserverInstrumentKind indicates a CounterObserver instrument. - CounterObserverInstrumentKind - // UpDownCounterObserverInstrumentKind indicates a UpDownCounterObserver - // instrument. - UpDownCounterObserverInstrumentKind -) - -// Synchronous returns whether this is a synchronous kind of instrument. -func (k InstrumentKind) Synchronous() bool { - switch k { - case CounterInstrumentKind, UpDownCounterInstrumentKind, HistogramInstrumentKind: - return true - } - return false -} - -// Asynchronous returns whether this is an asynchronous kind of instrument. -func (k InstrumentKind) Asynchronous() bool { - return !k.Synchronous() -} - -// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping). -func (k InstrumentKind) Adding() bool { - switch k { - case CounterInstrumentKind, UpDownCounterInstrumentKind, CounterObserverInstrumentKind, UpDownCounterObserverInstrumentKind: - return true - } - return false -} - -// Grouping returns whether this kind of instrument groups its inputs (as opposed to Adding). -func (k InstrumentKind) Grouping() bool { - return !k.Adding() -} - -// Monotonic returns whether this kind of instrument exposes a non-decreasing sum. -func (k InstrumentKind) Monotonic() bool { - switch k { - case CounterInstrumentKind, CounterObserverInstrumentKind: - return true - } - return false -} - -// PrecomputedSum returns whether this kind of instrument receives precomputed sums. -func (k InstrumentKind) PrecomputedSum() bool { - return k.Adding() && k.Asynchronous() -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go deleted file mode 100644 index 3a2e79d823..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT. - -package sdkapi - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[HistogramInstrumentKind-0] - _ = x[GaugeObserverInstrumentKind-1] - _ = x[CounterInstrumentKind-2] - _ = x[UpDownCounterInstrumentKind-3] - _ = x[CounterObserverInstrumentKind-4] - _ = x[UpDownCounterObserverInstrumentKind-5] -} - -const _InstrumentKind_name = "HistogramInstrumentKindGaugeObserverInstrumentKindCounterInstrumentKindUpDownCounterInstrumentKindCounterObserverInstrumentKindUpDownCounterObserverInstrumentKind" - -var _InstrumentKind_index = [...]uint8{0, 23, 50, 71, 98, 127, 162} - -func (i InstrumentKind) String() string { - if i < 0 || i >= InstrumentKind(len(_InstrumentKind_index)-1) { - return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go deleted file mode 100644 index f22895dae6..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" -) - -type noopInstrument struct { - descriptor Descriptor -} -type noopSyncInstrument struct{ noopInstrument } -type noopAsyncInstrument struct{ noopInstrument } - -var _ SyncImpl = noopSyncInstrument{} -var _ AsyncImpl = noopAsyncInstrument{} - -// NewNoopSyncInstrument returns a No-op implementation of the -// synchronous instrument interface. -func NewNoopSyncInstrument() SyncImpl { - return noopSyncInstrument{ - noopInstrument{ - descriptor: Descriptor{ - instrumentKind: CounterInstrumentKind, - }, - }, - } -} - -// NewNoopAsyncInstrument returns a No-op implementation of the -// asynchronous instrument interface. -func NewNoopAsyncInstrument() AsyncImpl { - return noopAsyncInstrument{ - noopInstrument{ - descriptor: Descriptor{ - instrumentKind: CounterObserverInstrumentKind, - }, - }, - } -} - -func (noopInstrument) Implementation() interface{} { - return nil -} - -func (n noopInstrument) Descriptor() Descriptor { - return n.descriptor -} - -func (noopSyncInstrument) RecordOne(context.Context, number.Number, []attribute.KeyValue) { -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go deleted file mode 100644 index 36836364bd..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" -) - -// MeterImpl is the interface an SDK must implement to supply a Meter -// implementation. -type MeterImpl interface { - // RecordBatch atomically records a batch of measurements. - RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurement ...Measurement) - - // NewSyncInstrument returns a newly constructed - // synchronous instrument implementation or an error, should - // one occur. - NewSyncInstrument(descriptor Descriptor) (SyncImpl, error) - - // NewAsyncInstrument returns a newly constructed - // asynchronous instrument implementation or an error, should - // one occur. - NewAsyncInstrument( - descriptor Descriptor, - runner AsyncRunner, - ) (AsyncImpl, error) -} - -// InstrumentImpl is a common interface for synchronous and -// asynchronous instruments. -type InstrumentImpl interface { - // Implementation returns the underlying implementation of the - // instrument, which allows the implementation to gain access - // to its own representation especially from a `Measurement`. - Implementation() interface{} - - // Descriptor returns a copy of the instrument's Descriptor. - Descriptor() Descriptor -} - -// SyncImpl is the implementation-level interface to a generic -// synchronous instrument (e.g., Histogram and Counter instruments). -type SyncImpl interface { - InstrumentImpl - - // RecordOne captures a single synchronous metric event. - RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) -} - -// AsyncImpl is an implementation-level interface to an -// asynchronous instrument (e.g., Observer instruments). -type AsyncImpl interface { - InstrumentImpl -} - -// AsyncRunner is expected to convert into an AsyncSingleRunner or an -// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner -// does not satisfy one of these interfaces. -type AsyncRunner interface { - // AnyRunner is a non-exported method with no functional use - // other than to make this a non-empty interface. - AnyRunner() -} - -// AsyncSingleRunner is an interface implemented by single-observer -// callbacks. -type AsyncSingleRunner interface { - // Run accepts a single instrument and function for capturing - // observations of that instrument. Each call to the function - // receives one captured observation. (The function accepts - // multiple observations so the same implementation can be - // used for batch runners.) - Run(ctx context.Context, single AsyncImpl, capture func([]attribute.KeyValue, ...Observation)) - - AsyncRunner -} - -// AsyncBatchRunner is an interface implemented by batch-observer -// callbacks. -type AsyncBatchRunner interface { - // Run accepts a function for capturing observations of - // multiple instruments. - Run(ctx context.Context, capture func([]attribute.KeyValue, ...Observation)) - - AsyncRunner -} - -// NewMeasurement constructs a single observation, a binding between -// an asynchronous instrument and a number. -func NewMeasurement(instrument SyncImpl, number number.Number) Measurement { - return Measurement{ - instrument: instrument, - number: number, - } -} - -// Measurement is a low-level type used with synchronous instruments -// as a direct interface to the SDK via `RecordBatch`. -type Measurement struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument SyncImpl -} - -// SyncImpl returns the instrument that created this measurement. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Measurement) SyncImpl() SyncImpl { - return m.instrument -} - -// Number returns a number recorded in this measurement. -func (m Measurement) Number() number.Number { - return m.number -} - -// NewObservation constructs a single observation, a binding between -// an asynchronous instrument and a number. -func NewObservation(instrument AsyncImpl, number number.Number) Observation { - return Observation{ - instrument: instrument, - number: number, - } -} - -// Observation is a low-level type used with asynchronous instruments -// as a direct interface to the SDK via `BatchObserver`. -type Observation struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument AsyncImpl -} - -// AsyncImpl returns the instrument that created this observation. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Observation) AsyncImpl() AsyncImpl { - return m.instrument -} - -// Number returns a number recorded in this observation. -func (m Observation) Number() number.Number { - return m.number -} diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/doc.go b/vendor/go.opentelemetry.io/otel/metric/unit/doc.go deleted file mode 100644 index f8e723593e..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/unit/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package unit provides units. -// -// This package is currently in a pre-GA phase. Backwards incompatible changes -// may be introduced in subsequent minor version releases as we work to track -// the evolving OpenTelemetry specification and user feedback. -package unit // import "go.opentelemetry.io/otel/metric/unit" diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go b/vendor/go.opentelemetry.io/otel/metric/unit/unit.go deleted file mode 100644 index 4615eb16f6..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package unit // import "go.opentelemetry.io/otel/metric/unit" - -type Unit string - -// Units defined by OpenTelemetry. -const ( - Dimensionless Unit = "1" - Bytes Unit = "By" - Milliseconds Unit = "ms" -) diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go deleted file mode 100644 index d29aaa32c0..0000000000 --- a/vendor/go.opentelemetry.io/otel/propagation.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/propagation" -) - -// GetTextMapPropagator returns the global TextMapPropagator. If none has been -// set, a No-Op TextMapPropagator is returned. -func GetTextMapPropagator() propagation.TextMapPropagator { - return global.TextMapPropagator() -} - -// SetTextMapPropagator sets propagator as the global TextMapPropagator. -func SetTextMapPropagator(propagator propagation.TextMapPropagator) { - global.SetTextMapPropagator(propagator) -} diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go deleted file mode 100644 index 303cdf1cbf..0000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package propagation // import "go.opentelemetry.io/otel/propagation" - -import ( - "context" - - "go.opentelemetry.io/otel/baggage" -) - -const baggageHeader = "baggage" - -// Baggage is a propagator that supports the W3C Baggage format. -// -// This propagates user-defined baggage associated with a trace. The complete -// specification is defined at https://www.w3.org/TR/baggage/. -type Baggage struct{} - -var _ TextMapPropagator = Baggage{} - -// Inject sets baggage key-values from ctx into the carrier. -func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { - bStr := baggage.FromContext(ctx).String() - if bStr != "" { - carrier.Set(baggageHeader, bStr) - } -} - -// Extract returns a copy of parent with the baggage from the carrier added. -func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { - bStr := carrier.Get(baggageHeader) - if bStr == "" { - return parent - } - - bag, err := baggage.Parse(bStr) - if err != nil { - return parent - } - return baggage.ContextWithBaggage(parent, bag) -} - -// Fields returns the keys who's values are set with Inject. -func (b Baggage) Fields() []string { - return []string{baggageHeader} -} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go deleted file mode 100644 index c119eb2858..0000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package propagation contains OpenTelemetry context propagators. - -OpenTelemetry propagators are used to extract and inject context data from and -into messages exchanged by applications. The propagator supported by this -package is the W3C Trace Context encoding -(https://www.w3.org/TR/trace-context/), and W3C Baggage -(https://www.w3.org/TR/baggage/). -*/ -package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go deleted file mode 100644 index c94438f73a..0000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package propagation // import "go.opentelemetry.io/otel/propagation" - -import ( - "context" - "net/http" -) - -// TextMapCarrier is the storage medium used by a TextMapPropagator. -type TextMapCarrier interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Get returns the value associated with the passed key. - Get(key string) string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Set stores the key-value pair. - Set(key string, value string) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Keys lists the keys stored in this carrier. - Keys() []string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage -// medium for propagated key-value pairs. -type MapCarrier map[string]string - -// Compile time check that MapCarrier implements the TextMapCarrier. -var _ TextMapCarrier = MapCarrier{} - -// Get returns the value associated with the passed key. -func (c MapCarrier) Get(key string) string { - return c[key] -} - -// Set stores the key-value pair. -func (c MapCarrier) Set(key, value string) { - c[key] = value -} - -// Keys lists the keys stored in this carrier. -func (c MapCarrier) Keys() []string { - keys := make([]string, 0, len(c)) - for k := range c { - keys = append(keys, k) - } - return keys -} - -// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. -type HeaderCarrier http.Header - -// Get returns the value associated with the passed key. -func (hc HeaderCarrier) Get(key string) string { - return http.Header(hc).Get(key) -} - -// Set stores the key-value pair. -func (hc HeaderCarrier) Set(key string, value string) { - http.Header(hc).Set(key, value) -} - -// Keys lists the keys stored in this carrier. -func (hc HeaderCarrier) Keys() []string { - keys := make([]string, 0, len(hc)) - for k := range hc { - keys = append(keys, k) - } - return keys -} - -// TextMapPropagator propagates cross-cutting concerns as key-value text -// pairs within a carrier that travels in-band across process boundaries. -type TextMapPropagator interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Inject set cross-cutting concerns from the Context into the carrier. - Inject(ctx context.Context, carrier TextMapCarrier) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Extract reads cross-cutting concerns from the carrier into a Context. - Extract(ctx context.Context, carrier TextMapCarrier) context.Context - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Fields returns the keys whose values are set with Inject. - Fields() []string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -type compositeTextMapPropagator []TextMapPropagator - -func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { - for _, i := range p { - i.Inject(ctx, carrier) - } -} - -func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { - for _, i := range p { - ctx = i.Extract(ctx, carrier) - } - return ctx -} - -func (p compositeTextMapPropagator) Fields() []string { - unique := make(map[string]struct{}) - for _, i := range p { - for _, k := range i.Fields() { - unique[k] = struct{}{} - } - } - - fields := make([]string, 0, len(unique)) - for k := range unique { - fields = append(fields, k) - } - return fields -} - -// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the -// group of passed TextMapPropagator. This allows different cross-cutting -// concerns to be propagates in a unified manner. -// -// The returned TextMapPropagator will inject and extract cross-cutting -// concerns in the order the TextMapPropagators were provided. Additionally, -// the Fields method will return a de-duplicated slice of the keys that are -// set with the Inject method. -func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { - return compositeTextMapPropagator(p) -} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go deleted file mode 100644 index 902692da08..0000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package propagation // import "go.opentelemetry.io/otel/propagation" - -import ( - "context" - "encoding/hex" - "fmt" - "regexp" - - "go.opentelemetry.io/otel/trace" -) - -const ( - supportedVersion = 0 - maxVersion = 254 - traceparentHeader = "traceparent" - tracestateHeader = "tracestate" -) - -// TraceContext is a propagator that supports the W3C Trace Context format -// (https://www.w3.org/TR/trace-context/) -// -// This propagator will propagate the traceparent and tracestate headers to -// guarantee traces are not broken. It is up to the users of this propagator -// to choose if they want to participate in a trace by modifying the -// traceparent header and relevant parts of the tracestate header containing -// their proprietary information. -type TraceContext struct{} - -var _ TextMapPropagator = TraceContext{} -var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") - -// Inject set tracecontext from the Context into the carrier. -func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { - sc := trace.SpanContextFromContext(ctx) - if !sc.IsValid() { - return - } - - if ts := sc.TraceState().String(); ts != "" { - carrier.Set(tracestateHeader, ts) - } - - // Clear all flags other than the trace-context supported sampling bit. - flags := sc.TraceFlags() & trace.FlagsSampled - - h := fmt.Sprintf("%.2x-%s-%s-%s", - supportedVersion, - sc.TraceID(), - sc.SpanID(), - flags) - carrier.Set(traceparentHeader, h) -} - -// Extract reads tracecontext from the carrier into a returned Context. -// -// The returned Context will be a copy of ctx and contain the extracted -// tracecontext as the remote SpanContext. If the extracted tracecontext is -// invalid, the passed ctx will be returned directly instead. -func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { - sc := tc.extract(carrier) - if !sc.IsValid() { - return ctx - } - return trace.ContextWithRemoteSpanContext(ctx, sc) -} - -func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { - h := carrier.Get(traceparentHeader) - if h == "" { - return trace.SpanContext{} - } - - matches := traceCtxRegExp.FindStringSubmatch(h) - - if len(matches) == 0 { - return trace.SpanContext{} - } - - if len(matches) < 5 { // four subgroups plus the overall match - return trace.SpanContext{} - } - - if len(matches[1]) != 2 { - return trace.SpanContext{} - } - ver, err := hex.DecodeString(matches[1]) - if err != nil { - return trace.SpanContext{} - } - version := int(ver[0]) - if version > maxVersion { - return trace.SpanContext{} - } - - if version == 0 && len(matches) != 5 { // four subgroups plus the overall match - return trace.SpanContext{} - } - - if len(matches[2]) != 32 { - return trace.SpanContext{} - } - - var scc trace.SpanContextConfig - - scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32]) - if err != nil { - return trace.SpanContext{} - } - - if len(matches[3]) != 16 { - return trace.SpanContext{} - } - scc.SpanID, err = trace.SpanIDFromHex(matches[3]) - if err != nil { - return trace.SpanContext{} - } - - if len(matches[4]) != 2 { - return trace.SpanContext{} - } - opts, err := hex.DecodeString(matches[4]) - if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) { - return trace.SpanContext{} - } - // Clear all flags other than the trace-context supported sampling bit. - scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled - - // Ignore the error returned here. Failure to parse tracestate MUST NOT - // affect the parsing of traceparent according to the W3C tracecontext - // specification. - scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) - scc.Remote = true - - sc := trace.NewSpanContext(scc) - if !sc.IsValid() { - return trace.SpanContext{} - } - - return sc -} - -// Fields returns the keys who's values are set with Inject. -func (tc TraceContext) Fields() []string { - return []string{traceparentHeader, tracestateHeader} -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/doc.go deleted file mode 100644 index ba878d1cf6..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the conventions -// as of the v1.7.0 version of the OpenTelemetry specification. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/exception.go deleted file mode 100644 index ea37068627..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/exception.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go deleted file mode 100644 index 9b430fac0c..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" - -import ( - "fmt" - "net" - "net/http" - "strconv" - "strings" - - "go.opentelemetry.io/otel/trace" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" -) - -// HTTP scheme attributes. -var ( - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") -) - -// NetAttributesFromHTTPRequest generates attributes of the net -// namespace as specified by the OpenTelemetry specification for a -// span. The network parameter is a string that net.Dial function -// from standard library can understand. -func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - switch network { - case "tcp", "tcp4", "tcp6": - attrs = append(attrs, NetTransportTCP) - case "udp", "udp4", "udp6": - attrs = append(attrs, NetTransportUDP) - case "ip", "ip4", "ip6": - attrs = append(attrs, NetTransportIP) - case "unix", "unixgram", "unixpacket": - attrs = append(attrs, NetTransportUnix) - default: - attrs = append(attrs, NetTransportOther) - } - - peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) - if peerIP != "" { - attrs = append(attrs, NetPeerIPKey.String(peerIP)) - } - if peerName != "" { - attrs = append(attrs, NetPeerNameKey.String(peerName)) - } - if peerPort != 0 { - attrs = append(attrs, NetPeerPortKey.Int(peerPort)) - } - - hostIP, hostName, hostPort := "", "", 0 - for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { - hostIP, hostName, hostPort = hostIPNamePort(someHost) - if hostIP != "" || hostName != "" || hostPort != 0 { - break - } - } - if hostIP != "" { - attrs = append(attrs, NetHostIPKey.String(hostIP)) - } - if hostName != "" { - attrs = append(attrs, NetHostNameKey.String(hostName)) - } - if hostPort != 0 { - attrs = append(attrs, NetHostPortKey.Int(hostPort)) - } - - return attrs -} - -// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. -// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized -// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the -// host portion will instead be returned in `name`. -func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { - var ( - hostPart, portPart string - parsedPort uint64 - err error - ) - if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { - hostPart, portPart = hostWithPort, "" - } - if parsedIP := net.ParseIP(hostPart); parsedIP != nil { - ip = parsedIP.String() - } else { - name = hostPart - } - if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { - port = int(parsedPort) - } - return -} - -// EndUserAttributesFromHTTPRequest generates attributes of the -// enduser namespace as specified by the OpenTelemetry specification -// for a span. -func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - if username, _, ok := request.BasicAuth(); ok { - return []attribute.KeyValue{EnduserIDKey.String(username)} - } - return nil -} - -// HTTPClientAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the client side. -func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - if request.Method != "" { - attrs = append(attrs, HTTPMethodKey.String(request.Method)) - } else { - attrs = append(attrs, HTTPMethodKey.String(http.MethodGet)) - } - - // remove any username/password info that may be in the URL - // before adding it to the attributes - userinfo := request.URL.User - request.URL.User = nil - - attrs = append(attrs, HTTPURLKey.String(request.URL.String())) - - // restore any username/password info that was removed - request.URL.User = userinfo - - return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) -} - -func httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if ua := request.UserAgent(); ua != "" { - attrs = append(attrs, HTTPUserAgentKey.String(ua)) - } - if request.ContentLength > 0 { - attrs = append(attrs, HTTPRequestContentLengthKey.Int64(request.ContentLength)) - } - - return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) -} - -func httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality - attrs := []attribute.KeyValue{} - - if request.TLS != nil { - attrs = append(attrs, HTTPSchemeHTTPS) - } else { - attrs = append(attrs, HTTPSchemeHTTP) - } - - if request.Host != "" { - attrs = append(attrs, HTTPHostKey.String(request.Host)) - } - - flavor := "" - if request.ProtoMajor == 1 { - flavor = fmt.Sprintf("1.%d", request.ProtoMinor) - } else if request.ProtoMajor == 2 { - flavor = "2" - } - if flavor != "" { - attrs = append(attrs, HTTPFlavorKey.String(flavor)) - } - - return attrs -} - -// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes -// to be used with server-side HTTP metrics. -func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if serverName != "" { - attrs = append(attrs, HTTPServerNameKey.String(serverName)) - } - return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) -} - -// HTTPServerAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the server side. Currently, only basic authentication is -// supported. -func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - HTTPMethodKey.String(request.Method), - HTTPTargetKey.String(request.RequestURI), - } - - if serverName != "" { - attrs = append(attrs, HTTPServerNameKey.String(serverName)) - } - if route != "" { - attrs = append(attrs, HTTPRouteKey.String(route)) - } - if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { - if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { - attrs = append(attrs, HTTPClientIPKey.String(addresses[0])) - } - } - - return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) -} - -// HTTPAttributesFromHTTPStatusCode generates attributes of the http -// namespace as specified by the OpenTelemetry specification for a -// span. -func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - HTTPStatusCodeKey.Int(code), - } - return attrs -} - -type codeRange struct { - fromInclusive int - toInclusive int -} - -func (r codeRange) contains(code int) bool { - return r.fromInclusive <= code && code <= r.toInclusive -} - -var validRangesPerCategory = map[int][]codeRange{ - 1: { - {http.StatusContinue, http.StatusEarlyHints}, - }, - 2: { - {http.StatusOK, http.StatusAlreadyReported}, - {http.StatusIMUsed, http.StatusIMUsed}, - }, - 3: { - {http.StatusMultipleChoices, http.StatusUseProxy}, - {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, - }, - 4: { - {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… - {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, - {http.StatusPreconditionRequired, http.StatusTooManyRequests}, - {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, - {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, - }, - 5: { - {http.StatusInternalServerError, http.StatusLoopDetected}, - {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, - }, -} - -// SpanStatusFromHTTPStatusCode generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - return spanCode, "" -} - -// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -// Exclude 4xx for SERVER to set the appropriate status. -func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - category := code / 100 - if spanKind == trace.SpanKindServer && category == 4 { - return codes.Unset, "" - } - return spanCode, "" -} - -// Validates the HTTP status code and returns corresponding span status code. -// If the `code` is not a valid HTTP status code, returns span status Error -// and false. -func validateHTTPStatusCode(code int) (codes.Code, bool) { - category := code / 100 - ranges, ok := validRangesPerCategory[category] - if !ok { - return codes.Error, false - } - ok = false - for _, crange := range ranges { - ok = crange.contains(code) - if ok { - break - } - } - if !ok { - return codes.Error, false - } - if category > 0 && category < 4 { - return codes.Unset, true - } - return codes.Error, true -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/resource.go deleted file mode 100644 index aab6daf3c5..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/resource.go +++ /dev/null @@ -1,946 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" - -import "go.opentelemetry.io/otel/attribute" - -// A cloud environment (e.g. GCP, Azure, AWS) -const ( - // Name of the cloud provider. - // - // Type: Enum - // Required: No - // Stability: stable - CloudProviderKey = attribute.Key("cloud.provider") - // The cloud account ID the resource is assigned to. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - // The geographical region the resource is running. Refer to your provider's docs - // to see the available regions, for example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure regions](https://azure.microsoft.com/en-us/global- - // infrastructure/geographies/), or [Google Cloud - // regions](https://cloud.google.com/about/locations). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-central1', 'us-east-1' - CloudRegionKey = attribute.Key("cloud.region") - // Cloud regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the resource - // is running. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - // The cloud platform in use. - // - // Type: Enum - // Required: No - // Stability: stable - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") -) - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. - // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo - // perguide/clusters.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l - // aunch_types.html) for an ECS task. - // - // Type: Enum - // Required: No - // Stability: stable - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates - // t/developerguide/task_definitions.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - // The task definition family this task definition is a member of. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - // The revision for this task definition. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // The ARN of an EKS cluster. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// Resources specific to Amazon Web Services. -const ( - // The name(s) of the AWS log group(s) an application is writing to. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like multi-container - // applications, where a single application has sidecar containers, and each write - // to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - // The Amazon Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - // The name(s) of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - // The ARN(s) of the AWS log stream(s). - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- - // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain - // several log streams, so these ARNs necessarily identify both a log group and a - // log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") -) - -// A container instance. -const ( - // Container name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - // Container ID. Usually a UUID, as for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container- - // identification). The UUID might be abbreviated. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - // The container runtime managing this container. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") - // Name of the image the container was built on. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - // Container image tag. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.1' - ContainerImageTagKey = attribute.Key("container.image.tag") -) - -// The software deployment. -const ( - // Name of the [deployment - // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'staging', 'production' - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// The device on which the process represented by this resource is running. -const ( - // A unique identifier representing the device - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values outlined - // below. This value is not an advertising identifier and MUST NOT be used as - // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id - // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden - // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the - // Firebase Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on best - // practices and exact implementation details. Caution should be taken when - // storing personal data or anything which can identify a user. GDPR and data - // protection laws may apply, ensure you do your own due diligence. - DeviceIDKey = attribute.Key("device.id") - // The model identifier for the device - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version of the - // model identifier rather than the market or consumer-friendly name of the - // device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - // The marketing name for the device model - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of the - // device model rather than a machine readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// A serverless instance. -const ( - // The name of the single function that this runtime instance executes. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'my-function' - // Note: This is the name of the function as configured/deployed on the FaaS - // platform and is usually different from the name of the callback function (which - // may be stored in the - // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- - // general.md#source-code-attributes) span attributes). - FaaSNameKey = attribute.Key("faas.name") - // The unique ID of the single function that this runtime instance executes. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' - // Note: Depending on the cloud provider, use: - - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- - // namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- - // aliases.html) with the resolved function version, as the same runtime instance - // may be invokable with multiple - // different aliases. - // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- - // resource-names) - // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- - // us/rest/api/resources/resources/get-by-id). - - // On some providers, it may not be possible to determine the full ID at startup, - // which is why this field cannot be made required. For example, on AWS the - // account ID - // part of the ARN is not available without calling another AWS API - // which may be deemed too slow for a short-running lambda function. - // As an alternative, consider setting `faas.id` as a span attribute instead. - FaaSIDKey = attribute.Key("faas.id") - // The immutable version of the function being executed. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- - // versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run:** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env- - // var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") - // The execution environment ID as a string, that will be potentially reused for - // other invocations to the same function/function version. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - // The amount of memory available to the serverless function in MiB. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 128 - // Note: It's recommended to set this attribute since e.g. too little memory can - // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, - // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this - // information. - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") -) - -// A host is defined as a general computing instance. -const ( - // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud - // provider. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-test' - HostIDKey = attribute.Key("host.id") - // Name of the host. On Unix systems, it may contain what the hostname command - // returns, or the fully qualified hostname, or another name specified by the - // user. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - // Type of host. For Cloud, this must be the machine type. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") - // The CPU architecture the host system is running on. - // - // Type: Enum - // Required: No - // Stability: stable - HostArchKey = attribute.Key("host.arch") - // Name of the VM image or OS install the host was instantiated from. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - // VM image ID. For Cloud, this value is from the provider. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - // The version string of the VM image as defined in [Version - // Attributes](README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// A Kubernetes Cluster. -const ( - // The name of the cluster. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") -) - -// A Kubernetes Node object. -const ( - // The name of the Node. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - // The UID of the Node. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") -) - -// A Kubernetes Namespace. -const ( - // The name of the namespace that the pod is running in. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") -) - -// A Kubernetes Pod object. -const ( - // The UID of the Pod. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - // The name of the Pod. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") -) - -// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). -const ( - // The name of the Container in a Pod template. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") -) - -// A Kubernetes ReplicaSet object. -const ( - // The UID of the ReplicaSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - // The name of the ReplicaSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") -) - -// A Kubernetes Deployment object. -const ( - // The UID of the Deployment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - // The name of the Deployment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") -) - -// A Kubernetes StatefulSet object. -const ( - // The UID of the StatefulSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - // The name of the StatefulSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") -) - -// A Kubernetes DaemonSet object. -const ( - // The UID of the DaemonSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - // The name of the DaemonSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") -) - -// A Kubernetes Job object. -const ( - // The UID of the Job. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - // The name of the Job. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") -) - -// A Kubernetes CronJob object. -const ( - // The UID of the CronJob. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - // The name of the CronJob. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") -) - -// The operating system (OS) on which the process represented by this resource is running. -const ( - // The operating system type. - // - // Type: Enum - // Required: Always - // Stability: stable - OSTypeKey = attribute.Key("os.type") - // Human readable (not intended to be parsed) OS version information, like e.g. - // reported by `ver` or `lsb_release -a` commands. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' - OSDescriptionKey = attribute.Key("os.description") - // Human readable operating system name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - // The version string of the operating system as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// An operating system process. -const ( - // Process identifier (PID). - // - // Type: int - // Required: No - // Stability: stable - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - // The name of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of - // `GetProcessImageFileNameW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - // The full path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - // The command used to launch the process (i.e. the command name). On Linux based - // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, - // can be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - // The full command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not - // set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - // All the command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited strings - // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be - // the full argv vector passed to `main`. - // - // Type: string[] - // Required: See below - // Stability: stable - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - // The username of the user that owns the process. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") -) - -// The single (language) runtime instance which is monitored. -const ( - // The name of the runtime of this process. For compiled native binaries, this - // SHOULD be the name of the compiler. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - // The version of the runtime of this process, as returned by the runtime without - // modification. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - // An additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") -) - -// A service instance. -const ( - // Logical name of the service. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled services. If - // the value was not specified, SDKs MUST fallback to `unknown_service:` - // concatenated with [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, the - // value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - // A namespace for `service.name`. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group of - // services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` is - // expected to be unique for all services that have no explicit namespace defined - // (so the empty/unspecified namespace is simply one more valid namespace). Zero- - // length namespace string is assumed equal to unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - // The string ID of the service instance. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be globally - // unique). The ID helps to distinguish instances of the same service that exist - // at the same time (e.g. instances of a horizontally scaled service). It is - // preferable for the ID to be persistent and stay the same for the lifetime of - // the service instance, however it is acceptable that the ID is ephemeral and - // changes during important lifetime events for the service (e.g. service - // restarts). If the service has no inherent unique ID that can be used as the - // value of this attribute it is recommended to generate a random Version 1 or - // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - // The version string of the service API or implementation. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2.0.0' - ServiceVersionKey = attribute.Key("service.version") -) - -// The telemetry SDK used to capture data recorded by the instrumentation libraries. -const ( - // The name of the telemetry SDK as defined above. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - // The language of the telemetry SDK. - // - // Type: Enum - // Required: No - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - // The version string of the telemetry SDK. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - // The version string of the auto instrumentation agent, if used. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1.2.3' - TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. -const ( - // The name of the web engine. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - // The version of the web engine. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") - // Additional description of the web engine (e.g. detailed version and edition - // information). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go deleted file mode 100644 index ec8b463d98..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/v1.7.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/trace.go deleted file mode 100644 index 9b75bd77ae..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/trace.go +++ /dev/null @@ -1,1558 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" - -import "go.opentelemetry.io/otel/attribute" - -// Span attributes used by AWS Lambda (in addition to general `faas` attributes). -const ( - // The full invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` - // applicable). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `faas.id` if an alias is involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// This document defines the attributes used to perform database client calls. -const ( - // An identifier for the database management system (DBMS) product being used. See - // below for a list of well-known identifiers. - // - // Type: Enum - // Required: Always - // Stability: stable - DBSystemKey = attribute.Key("db.system") - // The connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - // Username for accessing the database. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") - // The fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver - // used to connect. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - // If no [tech-specific attribute](#call-level-attributes-for-specific- - // technologies) is defined, this attribute is used to report the name of the - // database being accessed. For commands that switch the database, this should be - // set to the target database (even if the command fails). - // - // Type: string - // Required: Required, if applicable and no more-specific attribute is defined. - // Stability: stable - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called "schema - // name". - DBNameKey = attribute.Key("db.name") - // The database statement being executed. - // - // Type: string - // Required: Required if applicable and not explicitly disabled via - // instrumentation configuration. - // Stability: stable - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - // Note: The value may be sanitized to exclude sensitive information. - DBStatementKey = attribute.Key("db.statement") - // The name of the operation being executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // Required: Required, if `db.statement` is not applicable. - // Stability: stable - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to attempt any - // client-side parsing of `db.statement` just to get this property, but it should - // be set if the operation name is provided by the library being instrumented. If - // the SQL statement has an ambiguous operation, or performs more than one - // operation, this value may be omitted. - DBOperationKey = attribute.Key("db.operation") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") -) - -// Connection-level attributes for Microsoft SQL Server -const ( - // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- - // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named instance. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") -) - -// Call-level attributes for Cassandra -const ( - // The name of the keyspace being accessed. To be used instead of the generic - // `db.name` attribute. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'mykeyspace' - DBCassandraKeyspaceKey = attribute.Key("db.cassandra.keyspace") - // The fetch size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - // The consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra- - // oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // Required: No - // Stability: stable - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - // The name of the primary table that the operation is acting upon, including the - // schema name (if applicable). - // - // Type: string - // Required: Recommended if available. - // Stability: stable - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra rather - // than sql. It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - // Whether or not the query is idempotent. - // - // Type: boolean - // Required: No - // Stability: stable - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - // The number of times a query was speculatively executed. Not set or `0` if the - // query was not executed speculatively. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - // The ID of the coordinating node for a query. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - // The data center of the coordinating node for a query. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// Call-level attributes for Apache HBase -const ( - // The [HBase namespace](https://hbase.apache.org/book.html#_namespace) being - // accessed. To be used instead of the generic `db.name` attribute. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'default' - DBHBaseNamespaceKey = attribute.Key("db.hbase.namespace") -) - -// Call-level attributes for Redis -const ( - // The index of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To be used - // instead of the generic `db.name` attribute. - // - // Type: int - // Required: Required, if other than the default database (`0`). - // Stability: stable - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") -) - -// Call-level attributes for MongoDB -const ( - // The collection being accessed within the database stated in `db.name`. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") -) - -// Call-level attrbiutes for SQL databases -const ( - // The name of the primary table that the operation is acting upon, including the - // schema name (if applicable). - // - // Type: string - // Required: Recommended if available. - // Stability: stable - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. - DBSQLTableKey = attribute.Key("db.sql.table") -) - -// This document defines the attributes used to report a single exception associated with a span. -const ( - // The type of the exception (its fully-qualified class name, if applicable). The - // dynamic type of the exception should be preferred over the static type in - // languages that support it. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") - // The exception message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - // A stacktrace as a string in the natural representation for the language - // runtime. The representation is to be determined and documented by each language - // SIG. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - // SHOULD be set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // Required: No - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of a span, - // if that span is ended while the exception is still logically "in flight". - // This may be actually "in flight" in some languages (e.g. if the exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most languages. - - // It is usually not possible to determine at the point where an exception is - // thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending the span, - // as done in the [example above](#exception-end-example). - - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") -) - -// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. -const ( - // Type of the trigger on which the function is executed. - // - // Type: Enum - // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations. - // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing - // invocations, if it is known to the client. This is, for example, not the case, - // when the transport layer is abstracted in a FaaS client framework without - // access to its configuration. - // Stability: stable - FaaSTriggerKey = attribute.Key("faas.trigger") - // The execution ID of the current function execution. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSExecutionKey = attribute.Key("faas.execution") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. -const ( - // The name of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos - // DB to the database name. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - // Describes the type of the operation that was performed on the data. - // - // Type: Enum - // Required: Always - // Stability: stable - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - // A string containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - // The document name/table subjected to the operation. For example, in Cloud - // Storage or S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // A string containing the function invocation time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - // A string containing the schedule period as [Cron Expression](https://docs.oracl - // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") -) - -// Contains additional attributes for incoming FaaS spans. -const ( - // A boolean that is true if the serverless function is executed for the first - // time (aka cold-start). - // - // Type: boolean - // Required: No - // Stability: stable - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// Contains additional attributes for outgoing FaaS spans. -const ( - // The name of the invoked function. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked - // function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - // The cloud provider of the invoked function. - // - // Type: Enum - // Required: Always - // Stability: stable - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked - // function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - // The cloud region of the invoked function. - // - // Type: string - // Required: For some cloud providers, like AWS or GCP, the region in which a - // function is hosted is essential to uniquely identify the function and also part - // of its endpoint. Since it's part of the endpoint being called, the region is - // always known to clients. In these cases, `faas.invoked_region` MUST be set - // accordingly. If the region is unknown to the client or not required for - // identifying the invoked function, setting `faas.invoked_region` is optional. - // Stability: stable - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked - // function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") -) - -// These attributes may be used for any network related operation. -const ( - // Transport protocol used. See note below. - // - // Type: Enum - // Required: No - // Stability: stable - NetTransportKey = attribute.Key("net.transport") - // Remote address of the peer (dotted decimal for IPv4 or - // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) - // - // Type: string - // Required: No - // Stability: stable - // Examples: '127.0.0.1' - NetPeerIPKey = attribute.Key("net.peer.ip") - // Remote port number. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 80, 8080, 443 - NetPeerPortKey = attribute.Key("net.peer.port") - // Remote hostname or similar, see note below. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'example.com' - NetPeerNameKey = attribute.Key("net.peer.name") - // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '192.168.0.1' - NetHostIPKey = attribute.Key("net.host.ip") - // Like `net.peer.port` but for the host port. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 35555 - NetHostPortKey = attribute.Key("net.host.port") - // Local hostname or similar, see note below. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'localhost' - NetHostNameKey = attribute.Key("net.host.name") - // The internet connection type currently being used by the host. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'wifi' - NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") - // This describes more details regarding the connection.type. It may be the type - // of cell technology connection, but it could be used for describing details - // about a wifi connection. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'LTE' - NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") - // The name of the mobile carrier. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'sprint' - NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") - // The mobile carrier country code. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '310' - NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") - // The mobile carrier network code. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '001' - NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") - // The ISO 3166-1 alpha-2 2-character country code associated with the mobile - // carrier network. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'DE' - NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Another IP-based protocol - NetTransportIP = NetTransportKey.String("ip") - // Unix Domain socket. See below - NetTransportUnix = NetTransportKey.String("unix") - // Named or anonymous pipe. See note below - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -var ( - // wifi - NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") - // wired - NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") - // cell - NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") - // unavailable - NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") - // unknown - NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") -) - -var ( - // GPRS - NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") - // EDGE - NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") - // UMTS - NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") - // CDMA - NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") - // HSPA - NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") - // IDEN - NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") - // LTE - NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") - // EHRPD - NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") - // GSM - NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") -) - -// Operations that access some remote service. -const ( - // The [`service.name`](../../resource/semantic_conventions/README.md#service) of - // the remote service. SHOULD be equal to the actual `service.name` resource - // attribute of the remote service if any. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// These attributes may be used for any operation with an authenticated and/or authorized enduser. -const ( - // Username or client_id extracted from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the - // inbound request from outside the system. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - // Actual/assumed role the client is making the request under extracted from token - // or application security context. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - // Scopes or granted authorities the client currently possesses extracted from - // token or application security context. The value would come from the scope - // associated with an [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value - // in a [SAML 2.0 Assertion](http://docs.oasis- - // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// These attributes may be used for any operation to store information about a thread that started a span. -const ( - // Current "managed" thread ID (as opposed to OS thread ID). - // - // Type: int - // Required: No - // Stability: stable - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - // Current thread name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// These attributes allow to report this unit of code and therefore to provide more context about the span. -const ( - // The method or function name, or equivalent (usually rightmost part of the code - // unit's name). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - // The "namespace" within which `code.function` is defined. Usually the qualified - // class or module name, such that `code.namespace` + some separator + - // `code.function` form a unique identifier for the code unit. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - // The source code file name that identifies the code unit as uniquely as possible - // (preferably an absolute file path). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - // The line number in `code.filepath` best representing the operation. It SHOULD - // point within the code unit named in `code.function`. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") -) - -// This document defines semantic conventions for HTTP client and server Spans. -const ( - // HTTP request method. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. - // Usually the fragment is not transmitted over HTTP, but if it is known, it - // should be included nevertheless. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Note: `http.url` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case the attribute's - // value should be `https://www.example.com/`. - HTTPURLKey = attribute.Key("http.url") - // The full request target as passed in a HTTP request line or equivalent. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/path/12314/?q=ddds#123' - HTTPTargetKey = attribute.Key("http.target") - // The value of the [HTTP host - // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header - // should also be reported, see note. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'www.example.org' - // Note: When the header is present but empty the attribute SHOULD be set to the - // empty string. Note that this is a valid situation that is expected in certain - // cases, according the aforementioned [section of RFC - // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not - // set the attribute MUST NOT be set. - HTTPHostKey = attribute.Key("http.host") - // The URI scheme identifying the used protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // Required: If and only if one was received/sent. - // Stability: stable - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") - // Kind of HTTP protocol used. - // - // Type: Enum - // Required: No - // Stability: stable - // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` - // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. - HTTPFlavorKey = attribute.Key("http.flavor") - // Value of the [HTTP User- - // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the - // client. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' - HTTPUserAgentKey = attribute.Key("http.user_agent") - // The size of the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For - // requests using transport encoding, this should be the compressed size. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - // The size of the uncompressed request payload body after transport decoding. Not - // set if transport encoding not used. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5493 - HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") - // The size of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For - // requests using transport encoding, this should be the compressed size. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - // The size of the uncompressed response payload body after transport decoding. - // Not set if transport encoding not used. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5493 - HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") -) - -var ( - // HTTP 1.0 - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP 1.1 - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP 2 - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // SPDY protocol - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// Semantic Convention for HTTP Server -const ( - // The primary server name of the matched virtual host. This should be obtained - // via configuration. If no such configuration can be obtained, this attribute - // MUST NOT be set ( `net.host.name` should be used instead). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'example.com' - // Note: `http.url` is usually not readily available on the server side but would - // have to be assembled in a cumbersome and sometimes lossy process from other - // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus - // preferred to supply the raw data that is available. - HTTPServerNameKey = attribute.Key("http.server_name") - // The matched route (path template). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/users/:userID?' - HTTPRouteKey = attribute.Key("http.route") - // The IP address of the original client behind all proxies, if known (e.g. from - // [X-Forwarded-For](https://developer.mozilla.org/en- - // US/docs/Web/HTTP/Headers/X-Forwarded-For)). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '83.164.160.102' - // Note: This is not necessarily the same as `net.peer.ip`, which would - // identify the network-level peer, which may be a proxy. - - // This attribute should be set when a source of information different - // from the one used for `net.peer.ip`, is available even if that other - // source just confirms the same value as `net.peer.ip`. - // Rationale: For `net.peer.ip`, one typically does not know if it - // comes from a proxy, reverse proxy, or the actual client. Setting - // `http.client_ip` when it's the same as `net.peer.ip` means that - // one is at least somewhat confident that the address is not that of - // the closest proxy. - HTTPClientIPKey = attribute.Key("http.client_ip") -) - -// Attributes that exist for multiple DynamoDB request types. -const ( - // The keys in the `RequestItems` object field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - // The JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { - // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, - // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": - // "string", "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - // The JSON-serialized value of the `ItemCollectionMetrics` response field. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, - // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : - // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": - // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. - // - // Type: double - // Required: No - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // Required: No - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - // The value of the `ConsistentRead` request parameter. - // - // Type: boolean - // Required: No - // Stability: stable - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - // The value of the `ProjectionExpression` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, - // ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - // The value of the `Limit` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - // The value of the `AttributesToGet` request parameter. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - // The value of the `IndexName` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - // The value of the `Select` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") -) - -// DynamoDB.CreateTable -const ( - // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request - // field - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": - // number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": - // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// DynamoDB.ListTables -const ( - // The value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - // The the number of items in the `TableNames` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// DynamoDB.Query -const ( - // The value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // Required: No - // Stability: stable - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// DynamoDB.Scan -const ( - // The value of the `Segment` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - // The value of the `TotalSegments` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - // The value of the `Count` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - // The value of the `ScannedCount` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") -) - -// DynamoDB.UpdateTable -const ( - // The JSON-serialized value of each item in the `AttributeDefinitions` request - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` - // request field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// This document defines the attributes used in messaging systems. -const ( - // A string identifying the messaging system. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS' - MessagingSystemKey = attribute.Key("messaging.system") - // The message destination name. This might be equal to the span name but is - // required nevertheless. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - MessagingDestinationKey = attribute.Key("messaging.destination") - // The kind of message destination - // - // Type: Enum - // Required: Required only if the message destination is either a `queue` or - // `topic`. - // Stability: stable - MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") - // A boolean that is true if the message destination is temporary. - // - // Type: boolean - // Required: If missing, it is assumed to be false. - // Stability: stable - MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") - // The name of the transport protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'AMQP', 'MQTT' - MessagingProtocolKey = attribute.Key("messaging.protocol") - // The version of the transport protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.9.1' - MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") - // Connection string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'tibjmsnaming://localhost:7222', - // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' - MessagingURLKey = attribute.Key("messaging.url") - // A value used by the messaging system as an identifier for the message, - // represented as a string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message_id") - // The [conversation ID](#conversations) identifying the conversation to which the - // message belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'MyConversationID' - MessagingConversationIDKey = attribute.Key("messaging.conversation_id") - // The (uncompressed) size of the message payload in bytes. Also use this - // attribute if it is unknown whether the compressed or uncompressed payload size - // is reported. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2738 - MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") - // The compressed size of the message payload in bytes. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2048 - MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") -) - -var ( - // A message sent to a queue - MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") - // A message sent to a topic - MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") -) - -// Semantic convention for a consumer of messages received from a messaging system -const ( - // A string identifying the kind of message consumption as defined in the - // [Operation names](#operation-names) section above. If the operation is "send", - // this attribute MUST NOT be set, since the operation can be inferred from the - // span kind in that case. - // - // Type: Enum - // Required: No - // Stability: stable - MessagingOperationKey = attribute.Key("messaging.operation") - // The identifier for the consumer receiving a message. For Kafka, set it to - // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are - // present, or only `messaging.kafka.consumer_group`. For brokers, such as - // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the - // message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'mygroup - client-6' - MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") -) - -var ( - // receive - MessagingOperationReceive = MessagingOperationKey.String("receive") - // process - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// Attributes for RabbitMQ -const ( - // RabbitMQ message routing key. - // - // Type: string - // Required: Unless it is empty. - // Stability: stable - // Examples: 'myKey' - MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") -) - -// Attributes for Apache Kafka -const ( - // Message keys in Kafka are used for grouping alike messages to ensure they're - // processed on the same partition. They differ from `messaging.message_id` in - // that they're not unique. If the key is `null`, the attribute MUST NOT be set. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to be - // supplied for the attribute. If the key has no unambiguous, canonical string - // form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") - // Name of the Kafka Consumer Group that is handling the message. Only applies to - // consumers, not producers. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") - // Client ID for the Consumer or Producer that is handling the message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'client-5' - MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") - // Partition the message is sent to. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2 - MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") - // A boolean that is true if the message is a tombstone. - // - // Type: boolean - // Required: If missing, it is assumed to be false. - // Stability: stable - MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") -) - -// This document defines semantic conventions for remote procedure calls. -const ( - // A string identifying the remoting system. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'grpc', 'java_rmi', 'wcf' - RPCSystemKey = attribute.Key("rpc.system") - // The full (logical) name of the service being called, including its package - // name, if applicable. - // - // Type: string - // Required: No, but recommended - // Stability: stable - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing class. - // The `code.namespace` attribute may be used to store the latter (despite the - // attribute name, it may include a class name; e.g., class with method actually - // executing the call on the server side, RPC client stub class on the client - // side). - RPCServiceKey = attribute.Key("rpc.service") - // The name of the (logical) method being called, must be equal to the $method - // part in the span name. - // - // Type: string - // Required: No, but recommended - // Stability: stable - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the latter - // (e.g., method actually executing the call on the server side, RPC client stub - // method on the client side). - RPCMethodKey = attribute.Key("rpc.method") -) - -// Tech-specific attributes for gRPC. -const ( - // The [numeric status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC - // request. - // - // Type: Enum - // Required: Always - // Stability: stable - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). -const ( - // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC - // 1.0 does not specify this, the value can be omitted. - // - // Type: string - // Required: If missing, it is assumed to be "1.0". - // Stability: stable - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - // `id` property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be cast to - // string for simplicity. Use empty string in case of `null` value. Omit entirely - // if this is a notification. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - // `error.code` property of response if it is an error response. - // - // Type: int - // Required: If missing, response is assumed to be successful. - // Stability: stable - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - // `error.message` property of response if it is an error response. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") -) - -// RPC received/sent message. -const ( - // Whether this is a received or sent message. - // - // Type: Enum - // Required: No - // Stability: stable - MessageTypeKey = attribute.Key("message.type") - // MUST be calculated as two different counters starting from `1` one for sent - // messages and one for received message. - // - // Type: int - // Required: No - // Stability: stable - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - // Compressed size of the message in bytes. - // - // Type: int - // Required: No - // Stability: stable - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - // Uncompressed size of the message in bytes. - // - // Type: int - // Required: No - // Stability: stable - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go deleted file mode 100644 index 28b4f5e4d8..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/trace" -) - -// Tracer creates a named tracer that implements Tracer interface. -// If the name is an empty string then provider uses default name. -// -// This is short for GetTracerProvider().Tracer(name, opts...) -func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { - return GetTracerProvider().Tracer(name, opts...) -} - -// GetTracerProvider returns the registered global trace provider. -// If none is registered then an instance of NoopTracerProvider is returned. -// -// Use the trace provider to create a named tracer. E.g. -// tracer := otel.GetTracerProvider().Tracer("example.com/foo") -// or -// tracer := otel.Tracer("example.com/foo") -func GetTracerProvider() trace.TracerProvider { - return global.TracerProvider() -} - -// SetTracerProvider registers `tp` as the global trace provider. -func SetTracerProvider(tp trace.TracerProvider) { - global.SetTracerProvider(tp) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go deleted file mode 100644 index bcc333e04e..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ /dev/null @@ -1,316 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// TracerConfig is a group of options for a Tracer. -type TracerConfig struct { - instrumentationVersion string - // Schema URL of the telemetry emitted by the Tracer. - schemaURL string -} - -// InstrumentationVersion returns the version of the library providing instrumentation. -func (t *TracerConfig) InstrumentationVersion() string { - return t.instrumentationVersion -} - -// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. -func (t *TracerConfig) SchemaURL() string { - return t.schemaURL -} - -// NewTracerConfig applies all the options to a returned TracerConfig. -func NewTracerConfig(options ...TracerOption) TracerConfig { - var config TracerConfig - for _, option := range options { - config = option.apply(config) - } - return config -} - -// TracerOption applies an option to a TracerConfig. -type TracerOption interface { - apply(TracerConfig) TracerConfig -} - -type tracerOptionFunc func(TracerConfig) TracerConfig - -func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { - return fn(cfg) -} - -// SpanConfig is a group of options for a Span. -type SpanConfig struct { - attributes []attribute.KeyValue - timestamp time.Time - links []Link - newRoot bool - spanKind SpanKind - stackTrace bool -} - -// Attributes describe the associated qualities of a Span. -func (cfg *SpanConfig) Attributes() []attribute.KeyValue { - return cfg.attributes -} - -// Timestamp is a time in a Span life-cycle. -func (cfg *SpanConfig) Timestamp() time.Time { - return cfg.timestamp -} - -// StackTrace checks whether stack trace capturing is enabled. -func (cfg *SpanConfig) StackTrace() bool { - return cfg.stackTrace -} - -// Links are the associations a Span has with other Spans. -func (cfg *SpanConfig) Links() []Link { - return cfg.links -} - -// NewRoot identifies a Span as the root Span for a new trace. This is -// commonly used when an existing trace crosses trust boundaries and the -// remote parent span context should be ignored for security. -func (cfg *SpanConfig) NewRoot() bool { - return cfg.newRoot -} - -// SpanKind is the role a Span has in a trace. -func (cfg *SpanConfig) SpanKind() SpanKind { - return cfg.spanKind -} - -// NewSpanStartConfig applies all the options to a returned SpanConfig. -// No validation is performed on the returned SpanConfig (e.g. no uniqueness -// checking or bounding of data), it is left to the SDK to perform this -// action. -func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { - var c SpanConfig - for _, option := range options { - c = option.applySpanStart(c) - } - return c -} - -// NewSpanEndConfig applies all the options to a returned SpanConfig. -// No validation is performed on the returned SpanConfig (e.g. no uniqueness -// checking or bounding of data), it is left to the SDK to perform this -// action. -func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { - var c SpanConfig - for _, option := range options { - c = option.applySpanEnd(c) - } - return c -} - -// SpanStartOption applies an option to a SpanConfig. These options are applicable -// only when the span is created -type SpanStartOption interface { - applySpanStart(SpanConfig) SpanConfig -} - -type spanOptionFunc func(SpanConfig) SpanConfig - -func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { - return fn(cfg) -} - -// SpanEndOption applies an option to a SpanConfig. These options are -// applicable only when the span is ended. -type SpanEndOption interface { - applySpanEnd(SpanConfig) SpanConfig -} - -// EventConfig is a group of options for an Event. -type EventConfig struct { - attributes []attribute.KeyValue - timestamp time.Time - stackTrace bool -} - -// Attributes describe the associated qualities of an Event. -func (cfg *EventConfig) Attributes() []attribute.KeyValue { - return cfg.attributes -} - -// Timestamp is a time in an Event life-cycle. -func (cfg *EventConfig) Timestamp() time.Time { - return cfg.timestamp -} - -// StackTrace checks whether stack trace capturing is enabled. -func (cfg *EventConfig) StackTrace() bool { - return cfg.stackTrace -} - -// NewEventConfig applies all the EventOptions to a returned EventConfig. If no -// timestamp option is passed, the returned EventConfig will have a Timestamp -// set to the call time, otherwise no validation is performed on the returned -// EventConfig. -func NewEventConfig(options ...EventOption) EventConfig { - var c EventConfig - for _, option := range options { - c = option.applyEvent(c) - } - if c.timestamp.IsZero() { - c.timestamp = time.Now() - } - return c -} - -// EventOption applies span event options to an EventConfig. -type EventOption interface { - applyEvent(EventConfig) EventConfig -} - -// SpanOption are options that can be used at both the beginning and end of a span. -type SpanOption interface { - SpanStartOption - SpanEndOption -} - -// SpanStartEventOption are options that can be used at the start of a span, or with an event. -type SpanStartEventOption interface { - SpanStartOption - EventOption -} - -// SpanEndEventOption are options that can be used at the end of a span, or with an event. -type SpanEndEventOption interface { - SpanEndOption - EventOption -} - -type attributeOption []attribute.KeyValue - -func (o attributeOption) applySpan(c SpanConfig) SpanConfig { - c.attributes = append(c.attributes, []attribute.KeyValue(o)...) - return c -} -func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o attributeOption) applyEvent(c EventConfig) EventConfig { - c.attributes = append(c.attributes, []attribute.KeyValue(o)...) - return c -} - -var _ SpanStartEventOption = attributeOption{} - -// WithAttributes adds the attributes related to a span life-cycle event. -// These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these -// attributes provide additional information about the event being recorded -// (e.g. error, state change, processing progress, system event). -// -// If multiple of these options are passed the attributes of each successive -// option will extend the attributes instead of overwriting. There is no -// guarantee of uniqueness in the resulting attributes. -func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { - return attributeOption(attributes) -} - -// SpanEventOption are options that can be used with an event or a span. -type SpanEventOption interface { - SpanOption - EventOption -} - -type timestampOption time.Time - -func (o timestampOption) applySpan(c SpanConfig) SpanConfig { - c.timestamp = time.Time(o) - return c -} -func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o timestampOption) applyEvent(c EventConfig) EventConfig { - c.timestamp = time.Time(o) - return c -} - -var _ SpanEventOption = timestampOption{} - -// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. -// started, stopped, errored). -func WithTimestamp(t time.Time) SpanEventOption { - return timestampOption(t) -} - -type stackTraceOption bool - -func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { - c.stackTrace = bool(o) - return c -} -func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { - c.stackTrace = bool(o) - return c -} -func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } - -// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). -func WithStackTrace(b bool) SpanEndEventOption { - return stackTraceOption(b) -} - -// WithLinks adds links to a Span. The links are added to the existing Span -// links, i.e. this does not overwrite. Links with invalid span context are ignored. -func WithLinks(links ...Link) SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.links = append(cfg.links, links...) - return cfg - }) -} - -// WithNewRoot specifies that the Span should be treated as a root Span. Any -// existing parent span context will be ignored when defining the Span's trace -// identifiers. -func WithNewRoot() SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.newRoot = true - return cfg - }) -} - -// WithSpanKind sets the SpanKind of a Span. -func WithSpanKind(kind SpanKind) SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.spanKind = kind - return cfg - }) -} - -// WithInstrumentationVersion sets the instrumentation version. -func WithInstrumentationVersion(version string) TracerOption { - return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { - cfg.instrumentationVersion = version - return cfg - }) -} - -// WithSchemaURL sets the schema URL for the Tracer. -func WithSchemaURL(schemaURL string) TracerOption { - return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { - cfg.schemaURL = schemaURL - return cfg - }) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go deleted file mode 100644 index 76f9a083c4..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import "context" - -type traceContextKeyType int - -const currentSpanKey traceContextKeyType = iota - -// ContextWithSpan returns a copy of parent with span set as the current Span. -func ContextWithSpan(parent context.Context, span Span) context.Context { - return context.WithValue(parent, currentSpanKey, span) -} - -// ContextWithSpanContext returns a copy of parent with sc as the current -// Span. The Span implementation that wraps sc is non-recording and performs -// no operations other than to return sc as the SpanContext from the -// SpanContext method. -func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { - return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) -} - -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly -// as a remote SpanContext and as the current Span. The Span implementation -// that wraps rsc is non-recording and performs no operations other than to -// return rsc as the SpanContext from the SpanContext method. -func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { - return ContextWithSpanContext(parent, rsc.WithRemote(true)) -} - -// SpanFromContext returns the current Span from ctx. -// -// If no Span is currently set in ctx an implementation of a Span that -// performs no operations is returned. -func SpanFromContext(ctx context.Context) Span { - if ctx == nil { - return noopSpan{} - } - if span, ok := ctx.Value(currentSpanKey).(Span); ok { - return span - } - return noopSpan{} -} - -// SpanContextFromContext returns the current Span's SpanContext. -func SpanContextFromContext(ctx context.Context) SpanContext { - return SpanFromContext(ctx).SpanContext() -} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go deleted file mode 100644 index 391417718f..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package trace provides an implementation of the tracing part of the -OpenTelemetry API. - -To participate in distributed traces a Span needs to be created for the -operation being performed as part of a traced workflow. It its simplest form: - - var tracer trace.Tracer - - func init() { - tracer = otel.Tracer("instrumentation/package/name") - } - - func operation(ctx context.Context) { - var span trace.Span - ctx, span = tracer.Start(ctx, "operation") - defer span.End() - // ... - } - -A Tracer is unique to the instrumentation and is used to create Spans. -Instrumentation should be designed to accept a TracerProvider from which it -can create its own unique Tracer. Alternatively, the registered global -TracerProvider from the go.opentelemetry.io/otel package can be used as -a default. - - const ( - name = "instrumentation/package/name" - version = "0.1.0" - ) - - type Instrumentation struct { - tracer trace.Tracer - } - - func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { - if tp == nil { - tp = otel.TracerProvider() - } - return &Instrumentation{ - tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), - } - } - - func operation(ctx context.Context, inst *Instrumentation) { - var span trace.Span - ctx, span = inst.tracer.Start(ctx, "operation") - defer span.End() - // ... - } -*/ -package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go deleted file mode 100644 index 88fcb81611..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -// nonRecordingSpan is a minimal implementation of a Span that wraps a -// SpanContext. It performs no operations other than to return the wrapped -// SpanContext. -type nonRecordingSpan struct { - noopSpan - - sc SpanContext -} - -// SpanContext returns the wrapped SpanContext. -func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go deleted file mode 100644 index ad9a9fc5be..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" -) - -// NewNoopTracerProvider returns an implementation of TracerProvider that -// performs no operations. The Tracer and Spans created from the returned -// TracerProvider also perform no operations. -func NewNoopTracerProvider() TracerProvider { - return noopTracerProvider{} -} - -type noopTracerProvider struct{} - -var _ TracerProvider = noopTracerProvider{} - -// Tracer returns noop implementation of Tracer. -func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { - return noopTracer{} -} - -// noopTracer is an implementation of Tracer that preforms no operations. -type noopTracer struct{} - -var _ Tracer = noopTracer{} - -// Start carries forward a non-recording Span, if one is present in the context, otherwise it -// creates a no-op Span. -func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { - span := SpanFromContext(ctx) - if _, ok := span.(nonRecordingSpan); !ok { - // span is likely already a noopSpan, but let's be sure - span = noopSpan{} - } - return ContextWithSpan(ctx, span), span -} - -// noopSpan is an implementation of Span that preforms no operations. -type noopSpan struct{} - -var _ Span = noopSpan{} - -// SpanContext returns an empty span context. -func (noopSpan) SpanContext() SpanContext { return SpanContext{} } - -// IsRecording always returns false. -func (noopSpan) IsRecording() bool { return false } - -// SetStatus does nothing. -func (noopSpan) SetStatus(codes.Code, string) {} - -// SetError does nothing. -func (noopSpan) SetError(bool) {} - -// SetAttributes does nothing. -func (noopSpan) SetAttributes(...attribute.KeyValue) {} - -// End does nothing. -func (noopSpan) End(...SpanEndOption) {} - -// RecordError does nothing. -func (noopSpan) RecordError(error, ...EventOption) {} - -// AddEvent does nothing. -func (noopSpan) AddEvent(string, ...EventOption) {} - -// SetName does nothing. -func (noopSpan) SetName(string) {} - -// TracerProvider returns a no-op TracerProvider -func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go deleted file mode 100644 index 0923ceb98d..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "bytes" - "context" - "encoding/hex" - "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" -) - -const ( - // FlagsSampled is a bitmask with the sampled bit set. A SpanContext - // with the sampling bit set means the span is sampled. - FlagsSampled = TraceFlags(0x01) - - errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" - - errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" - errNilTraceID errorConst = "trace-id can't be all zero" - - errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" - errNilSpanID errorConst = "span-id can't be all zero" -) - -type errorConst string - -func (e errorConst) Error() string { - return string(e) -} - -// TraceID is a unique identity of a trace. -// nolint:revive // revive complains about stutter of `trace.TraceID`. -type TraceID [16]byte - -var nilTraceID TraceID -var _ json.Marshaler = nilTraceID - -// IsValid checks whether the trace TraceID is valid. A valid trace ID does -// not consist of zeros only. -func (t TraceID) IsValid() bool { - return !bytes.Equal(t[:], nilTraceID[:]) -} - -// MarshalJSON implements a custom marshal function to encode TraceID -// as a hex string. -func (t TraceID) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String returns the hex string representation form of a TraceID -func (t TraceID) String() string { - return hex.EncodeToString(t[:]) -} - -// SpanID is a unique identity of a span in a trace. -type SpanID [8]byte - -var nilSpanID SpanID -var _ json.Marshaler = nilSpanID - -// IsValid checks whether the SpanID is valid. A valid SpanID does not consist -// of zeros only. -func (s SpanID) IsValid() bool { - return !bytes.Equal(s[:], nilSpanID[:]) -} - -// MarshalJSON implements a custom marshal function to encode SpanID -// as a hex string. -func (s SpanID) MarshalJSON() ([]byte, error) { - return json.Marshal(s.String()) -} - -// String returns the hex string representation form of a SpanID -func (s SpanID) String() string { - return hex.EncodeToString(s[:]) -} - -// TraceIDFromHex returns a TraceID from a hex string if it is compliant with -// the W3C trace-context specification. See more at -// https://www.w3.org/TR/trace-context/#trace-id -// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. -func TraceIDFromHex(h string) (TraceID, error) { - t := TraceID{} - if len(h) != 32 { - return t, errInvalidTraceIDLength - } - - if err := decodeHex(h, t[:]); err != nil { - return t, err - } - - if !t.IsValid() { - return t, errNilTraceID - } - return t, nil -} - -// SpanIDFromHex returns a SpanID from a hex string if it is compliant -// with the w3c trace-context specification. -// See more at https://www.w3.org/TR/trace-context/#parent-id -func SpanIDFromHex(h string) (SpanID, error) { - s := SpanID{} - if len(h) != 16 { - return s, errInvalidSpanIDLength - } - - if err := decodeHex(h, s[:]); err != nil { - return s, err - } - - if !s.IsValid() { - return s, errNilSpanID - } - return s, nil -} - -func decodeHex(h string, b []byte) error { - for _, r := range h { - switch { - case 'a' <= r && r <= 'f': - continue - case '0' <= r && r <= '9': - continue - default: - return errInvalidHexID - } - } - - decoded, err := hex.DecodeString(h) - if err != nil { - return err - } - - copy(b, decoded) - return nil -} - -// TraceFlags contains flags that can be set on a SpanContext -type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. - -// IsSampled returns if the sampling bit is set in the TraceFlags. -func (tf TraceFlags) IsSampled() bool { - return tf&FlagsSampled == FlagsSampled -} - -// WithSampled sets the sampling bit in a new copy of the TraceFlags. -func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { - if sampled { - return tf | FlagsSampled - } - - return tf &^ FlagsSampled -} - -// MarshalJSON implements a custom marshal function to encode TraceFlags -// as a hex string. -func (tf TraceFlags) MarshalJSON() ([]byte, error) { - return json.Marshal(tf.String()) -} - -// String returns the hex string representation form of TraceFlags -func (tf TraceFlags) String() string { - return hex.EncodeToString([]byte{byte(tf)}[:]) -} - -// SpanContextConfig contains mutable fields usable for constructing -// an immutable SpanContext. -type SpanContextConfig struct { - TraceID TraceID - SpanID SpanID - TraceFlags TraceFlags - TraceState TraceState - Remote bool -} - -// NewSpanContext constructs a SpanContext using values from the provided -// SpanContextConfig. -func NewSpanContext(config SpanContextConfig) SpanContext { - return SpanContext{ - traceID: config.TraceID, - spanID: config.SpanID, - traceFlags: config.TraceFlags, - traceState: config.TraceState, - remote: config.Remote, - } -} - -// SpanContext contains identifying trace information about a Span. -type SpanContext struct { - traceID TraceID - spanID SpanID - traceFlags TraceFlags - traceState TraceState - remote bool -} - -var _ json.Marshaler = SpanContext{} - -// IsValid returns if the SpanContext is valid. A valid span context has a -// valid TraceID and SpanID. -func (sc SpanContext) IsValid() bool { - return sc.HasTraceID() && sc.HasSpanID() -} - -// IsRemote indicates whether the SpanContext represents a remotely-created Span. -func (sc SpanContext) IsRemote() bool { - return sc.remote -} - -// WithRemote returns a copy of sc with the Remote property set to remote. -func (sc SpanContext) WithRemote(remote bool) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: remote, - } -} - -// TraceID returns the TraceID from the SpanContext. -func (sc SpanContext) TraceID() TraceID { - return sc.traceID -} - -// HasTraceID checks if the SpanContext has a valid TraceID. -func (sc SpanContext) HasTraceID() bool { - return sc.traceID.IsValid() -} - -// WithTraceID returns a new SpanContext with the TraceID replaced. -func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { - return SpanContext{ - traceID: traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// SpanID returns the SpanID from the SpanContext. -func (sc SpanContext) SpanID() SpanID { - return sc.spanID -} - -// HasSpanID checks if the SpanContext has a valid SpanID. -func (sc SpanContext) HasSpanID() bool { - return sc.spanID.IsValid() -} - -// WithSpanID returns a new SpanContext with the SpanID replaced. -func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// TraceFlags returns the flags from the SpanContext. -func (sc SpanContext) TraceFlags() TraceFlags { - return sc.traceFlags -} - -// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. -func (sc SpanContext) IsSampled() bool { - return sc.traceFlags.IsSampled() -} - -// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. -func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: flags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// TraceState returns the TraceState from the SpanContext. -func (sc SpanContext) TraceState() TraceState { - return sc.traceState -} - -// WithTraceState returns a new SpanContext with the TraceState replaced. -func (sc SpanContext) WithTraceState(state TraceState) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: state, - remote: sc.remote, - } -} - -// Equal is a predicate that determines whether two SpanContext values are equal. -func (sc SpanContext) Equal(other SpanContext) bool { - return sc.traceID == other.traceID && - sc.spanID == other.spanID && - sc.traceFlags == other.traceFlags && - sc.traceState.String() == other.traceState.String() && - sc.remote == other.remote -} - -// MarshalJSON implements a custom marshal function to encode a SpanContext. -func (sc SpanContext) MarshalJSON() ([]byte, error) { - return json.Marshal(SpanContextConfig{ - TraceID: sc.traceID, - SpanID: sc.spanID, - TraceFlags: sc.traceFlags, - TraceState: sc.traceState, - Remote: sc.remote, - }) -} - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: methods may be added to this interface in minor releases. -type Span interface { - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, overriding previous values set. The description is only - // included in a status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: methods may be added to this interface in minor releases. -type Tracer interface { - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides access to instrumentation Tracers. -// -// Warning: methods may be added to this interface in minor releases. -type TracerProvider interface { - // Tracer creates an implementation of the Tracer interface. - // The instrumentationName must be the name of the library providing - // instrumentation. This name may be the same as the instrumented code - // only if that code provides built-in instrumentation. If the - // instrumentationName is empty, then a implementation defined default - // name will be used instead. - // - // This method must be concurrency safe. - Tracer(instrumentationName string, opts ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go deleted file mode 100644 index 86fc62c1d8..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" -) - -var ( - maxListMembers = 32 - - listDelimiter = "," - - // based on the W3C Trace Context specification, see - // https://www.w3.org/TR/trace-context-1/#tracestate-header - noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` - - keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) - valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) - memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) - - errInvalidKey errorConst = "invalid tracestate key" - errInvalidValue errorConst = "invalid tracestate value" - errInvalidMember errorConst = "invalid tracestate list-member" - errMemberNumber errorConst = "too many list-members in tracestate" - errDuplicate errorConst = "duplicate list-member in tracestate" -) - -type member struct { - Key string - Value string -} - -func newMember(key, value string) (member, error) { - if !keyRe.MatchString(key) { - return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) - } - if !valueRe.MatchString(value) { - return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) - } - return member{Key: key, Value: value}, nil -} - -func parseMember(m string) (member, error) { - matches := memberRe.FindStringSubmatch(m) - if len(matches) != 5 { - return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) - } - - return member{ - Key: matches[1], - Value: matches[4], - }, nil - -} - -// String encodes member into a string compliant with the W3C Trace Context -// specification. -func (m member) String() string { - return fmt.Sprintf("%s=%s", m.Key, m.Value) -} - -// TraceState provides additional vendor-specific trace identification -// information across different distributed tracing systems. It represents an -// immutable list consisting of key/value pairs, each pair is referred to as a -// list-member. -// -// TraceState conforms to the W3C Trace Context specification -// (https://www.w3.org/TR/trace-context-1). All operations that create or copy -// a TraceState do so by validating all input and will only produce TraceState -// that conform to the specification. Specifically, this means that all -// list-member's key/value pairs are valid, no duplicate list-members exist, -// and the maximum number of list-members (32) is not exceeded. -type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` - // list is the members in order. - list []member -} - -var _ json.Marshaler = TraceState{} - -// ParseTraceState attempts to decode a TraceState from the passed -// string. It returns an error if the input is invalid according to the W3C -// Trace Context specification. -func ParseTraceState(tracestate string) (TraceState, error) { - if tracestate == "" { - return TraceState{}, nil - } - - wrapErr := func(err error) error { - return fmt.Errorf("failed to parse tracestate: %w", err) - } - - var members []member - found := make(map[string]struct{}) - for _, memberStr := range strings.Split(tracestate, listDelimiter) { - if len(memberStr) == 0 { - continue - } - - m, err := parseMember(memberStr) - if err != nil { - return TraceState{}, wrapErr(err) - } - - if _, ok := found[m.Key]; ok { - return TraceState{}, wrapErr(errDuplicate) - } - found[m.Key] = struct{}{} - - members = append(members, m) - if n := len(members); n > maxListMembers { - return TraceState{}, wrapErr(errMemberNumber) - } - } - - return TraceState{list: members}, nil -} - -// MarshalJSON marshals the TraceState into JSON. -func (ts TraceState) MarshalJSON() ([]byte, error) { - return json.Marshal(ts.String()) -} - -// String encodes the TraceState into a string compliant with the W3C -// Trace Context specification. The returned string will be invalid if the -// TraceState contains any invalid members. -func (ts TraceState) String() string { - members := make([]string, len(ts.list)) - for i, m := range ts.list { - members[i] = m.String() - } - return strings.Join(members, listDelimiter) -} - -// Get returns the value paired with key from the corresponding TraceState -// list-member if it exists, otherwise an empty string is returned. -func (ts TraceState) Get(key string) string { - for _, member := range ts.list { - if member.Key == key { - return member.Value - } - } - - return "" -} - -// Insert adds a new list-member defined by the key/value pair to the -// TraceState. If a list-member already exists for the given key, that -// list-member's value is updated. The new or updated list-member is always -// moved to the beginning of the TraceState as specified by the W3C Trace -// Context specification. -// -// If key or value are invalid according to the W3C Trace Context -// specification an error is returned with the original TraceState. -// -// If adding a new list-member means the TraceState would have more members -// than is allowed an error is returned instead with the original TraceState. -func (ts TraceState) Insert(key, value string) (TraceState, error) { - m, err := newMember(key, value) - if err != nil { - return ts, err - } - - cTS := ts.Delete(key) - if cTS.Len()+1 > maxListMembers { - // TODO (MrAlias): When the second version of the Trace Context - // specification is published this needs to not return an error. - // Instead it should drop the "right-most" member and insert the new - // member at the front. - // - // https://github.com/w3c/trace-context/pull/448 - return ts, fmt.Errorf("failed to insert: %w", errMemberNumber) - } - - cTS.list = append(cTS.list, member{}) - copy(cTS.list[1:], cTS.list) - cTS.list[0] = m - - return cTS, nil -} - -// Delete returns a copy of the TraceState with the list-member identified by -// key removed. -func (ts TraceState) Delete(key string) TraceState { - members := make([]member, ts.Len()) - copy(members, ts.list) - for i, member := range ts.list { - if member.Key == key { - members = append(members[:i], members[i+1:]...) - // TraceState should contain no duplicate members. - break - } - } - return TraceState{list: members} -} - -// Len returns the number of list-members in the TraceState. -func (ts TraceState) Len() int { - return len(ts.list) -} diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index dbb61a4227..0000000000 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go deleted file mode 100644 index a09bcbb5e8..0000000000 --- a/vendor/go.opentelemetry.io/otel/version.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -// Version is the current release version of OpenTelemetry in use. -func Version() string { - return "1.4.1" -} diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml deleted file mode 100644 index 3f06f29934..0000000000 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -module-sets: - stable-v1: - version: v1.4.1 - modules: - - go.opentelemetry.io/otel - - go.opentelemetry.io/otel/bridge/opentracing - - go.opentelemetry.io/otel/example/fib - - go.opentelemetry.io/otel/example/jaeger - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - - go.opentelemetry.io/otel/exporters/jaeger - - go.opentelemetry.io/otel/exporters/zipkin - - go.opentelemetry.io/otel/exporters/otlp/otlptrace - - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc - - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp - - go.opentelemetry.io/otel/exporters/otlp/internal/retry - - go.opentelemetry.io/otel/exporters/stdout/stdouttrace - - go.opentelemetry.io/otel/trace - - go.opentelemetry.io/otel/sdk - experimental-metrics: - version: v0.27.0 - modules: - - go.opentelemetry.io/otel/example/prometheus - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - - go.opentelemetry.io/otel/exporters/prometheus - - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric - - go.opentelemetry.io/otel/internal/metric - - go.opentelemetry.io/otel/metric - - go.opentelemetry.io/otel/sdk/export/metric - - go.opentelemetry.io/otel/sdk/metric - experimental-schema: - version: v0.0.2 - modules: - - go.opentelemetry.io/otel/schema - bridge: - version: v0.27.1 - modules: - - go.opentelemetry.io/otel/bridge/opencensus - - go.opentelemetry.io/otel/bridge/opencensus/test - - go.opentelemetry.io/otel/example/opencensus -excluded-modules: - - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/modules.txt b/vendor/modules.txt index 2e4b31e160..7d23ad92f5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -52,28 +52,7 @@ github.com/davecgh/go-spew/spew github.com/distribution/reference # github.com/docker/docker v24.0.0-rc.2.0.20230908212318-6ce5aa1cd5a4+incompatible ## explicit -github.com/docker/docker/api -github.com/docker/docker/api/types -github.com/docker/docker/api/types/blkiodev -github.com/docker/docker/api/types/checkpoint -github.com/docker/docker/api/types/container -github.com/docker/docker/api/types/events -github.com/docker/docker/api/types/filters -github.com/docker/docker/api/types/image -github.com/docker/docker/api/types/mount -github.com/docker/docker/api/types/network -github.com/docker/docker/api/types/registry -github.com/docker/docker/api/types/strslice -github.com/docker/docker/api/types/swarm -github.com/docker/docker/api/types/swarm/runtime -github.com/docker/docker/api/types/system -github.com/docker/docker/api/types/time -github.com/docker/docker/api/types/versions -github.com/docker/docker/api/types/volume -github.com/docker/docker/client github.com/docker/docker/errdefs -github.com/docker/docker/image/spec/specs-go/v1 -github.com/docker/docker/internal/multierror github.com/docker/docker/libnetwork/bitmap github.com/docker/docker/libnetwork/discoverapi github.com/docker/docker/libnetwork/driverapi @@ -106,7 +85,6 @@ github.com/docker/docker/pkg/plugins/transport github.com/docker/docker/pkg/rootless # github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea ## explicit; go 1.18 -github.com/docker/go-connections/nat github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig # github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c @@ -115,15 +93,9 @@ github.com/docker/go-events # github.com/docker/go-metrics v0.0.1 ## explicit; go 1.11 github.com/docker/go-metrics -# github.com/docker/go-units v0.5.0 -## explicit -github.com/docker/go-units # github.com/dustin/go-humanize v1.0.0 ## explicit github.com/dustin/go-humanize -# github.com/felixge/httpsnoop v1.0.3 -## explicit; go 1.13 -github.com/felixge/httpsnoop # github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee ## explicit github.com/fernet/fernet-go @@ -133,10 +105,6 @@ github.com/fsnotify/fsnotify # github.com/go-logr/logr v1.2.4 ## explicit; go 1.16 github.com/go-logr/logr -github.com/go-logr/logr/funcr -# github.com/go-logr/stdr v1.2.2 -## explicit; go 1.16 -github.com/go-logr/stdr # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/gogoproto @@ -218,10 +186,6 @@ github.com/jmoiron/sqlx/types # github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/moby/term v0.5.0 -## explicit; go 1.18 -# github.com/morikuni/aec v1.0.0 -## explicit # github.com/nxadm/tail v1.4.8 ## explicit; go 1.13 github.com/nxadm/tail @@ -267,10 +231,6 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.0-rc5 -## explicit; go 1.18 -github.com/opencontainers/image-spec/specs-go -github.com/opencontainers/image-spec/specs-go/v1 # github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee ## explicit github.com/phayes/permbits @@ -375,34 +335,6 @@ go.etcd.io/etcd/server/v3/etcdserver/api/snap go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb go.etcd.io/etcd/server/v3/wal go.etcd.io/etcd/server/v3/wal/walpb -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 -## explicit; go 1.16 -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp -# go.opentelemetry.io/otel v1.4.1 -## explicit; go 1.16 -go.opentelemetry.io/otel -go.opentelemetry.io/otel/attribute -go.opentelemetry.io/otel/baggage -go.opentelemetry.io/otel/codes -go.opentelemetry.io/otel/internal -go.opentelemetry.io/otel/internal/baggage -go.opentelemetry.io/otel/internal/global -go.opentelemetry.io/otel/propagation -go.opentelemetry.io/otel/semconv/v1.7.0 -# go.opentelemetry.io/otel/internal/metric v0.27.0 -## explicit; go 1.16 -go.opentelemetry.io/otel/internal/metric/global -go.opentelemetry.io/otel/internal/metric/registry -# go.opentelemetry.io/otel/metric v0.27.0 -## explicit; go 1.16 -go.opentelemetry.io/otel/metric -go.opentelemetry.io/otel/metric/global -go.opentelemetry.io/otel/metric/number -go.opentelemetry.io/otel/metric/sdkapi -go.opentelemetry.io/otel/metric/unit -# go.opentelemetry.io/otel/trace v1.4.1 -## explicit; go 1.16 -go.opentelemetry.io/otel/trace # go.uber.org/atomic v1.9.0 ## explicit; go 1.13 go.uber.org/atomic