diff --git a/api/docs/apis-image.openshift.io/v1.ImageStream.adoc b/api/docs/apis-image.openshift.io/v1.ImageStream.adoc index f39322c54498..2130095dbffc 100644 --- a/api/docs/apis-image.openshift.io/v1.ImageStream.adoc +++ b/api/docs/apis-image.openshift.io/v1.ImageStream.adoc @@ -878,6 +878,60 @@ The server guarantees that the objects returned when using continue will be iden * application/vnd.kubernetes.protobuf +[[Get-apis-image.openshift.io-v1-namespaces-namespace-imagestreams-name-layers]] +=== Get layers of a ImageStream in a namespace +Read layers of the specified ImageStream + +==== HTTP request +---- +GET /apis/image.openshift.io/v1/namespaces/$NAMESPACE/imagestreams/$NAME/layers HTTP/1.1 +Authorization: Bearer $TOKEN +Accept: application/json +Connection: close +---- + +==== Curl request +---- +$ curl -k \ + -H "Authorization: Bearer $TOKEN" \ + -H 'Accept: application/json' \ + https://$ENDPOINT/apis/image.openshift.io/v1/namespaces/$NAMESPACE/imagestreams/$NAME/layers +---- + +==== Path parameters +[cols="1,5", options="header"] +|=== +|Parameter|Description +|name|name of the ImageStreamLayers +|namespace|object name and auth scope, such as for teams and projects +|=== + +==== Query parameters +[cols="1,5", options="header"] +|=== +|Parameter|Description +|pretty|If 'true', then the output is pretty printed. +|=== + +==== Responses +[cols="1,5", options="header"] +|=== +|HTTP Code|Schema +|200 OK|v1.ImageStreamLayers +|401 Unauthorized| +|=== + +==== Consumes + +* \*/* + +==== Produces + +* application/json +* application/yaml +* application/vnd.kubernetes.protobuf + + [[Get-apis-image.openshift.io-v1-namespaces-namespace-imagestreams-name-secrets]] === Get secrets of a ImageStream in a namespace Read secrets of the specified ImageStream diff --git a/api/protobuf-spec/github_com_openshift_api_image_v1.proto b/api/protobuf-spec/github_com_openshift_api_image_v1.proto index bbbfa5df7d40..417a6c1bb29d 100644 --- a/api/protobuf-spec/github_com_openshift_api_image_v1.proto +++ b/api/protobuf-spec/github_com_openshift_api_image_v1.proto @@ -68,6 +68,20 @@ message Image { optional string dockerImageConfig = 10; } +// ImageBlobReferences describes the blob references within an image. +message ImageBlobReferences { + // layers is the list of blobs that compose this image, from base layer to top layer. + // All layers referenced by this array will be defined in the blobs map. Some images + // may have zero layers. + // +optional + repeated string layers = 1; + + // manifest, if set, is the blob that contains the image manifest. Some images do + // not have separate manifest blobs and this field will be set to nil if so. + // +optional + optional string manifest = 2; +} + // ImageImportSpec describes a request to import a specific image. message ImageImportSpec { // From is the source of an image to import; only kind DockerImage is allowed @@ -110,6 +124,16 @@ message ImageLayer { optional string mediaType = 3; } +// ImageLayerData contains metadata about an image layer. +message ImageLayerData { + // Size of the layer in bytes as defined by the underlying store. This field is + // optional if the necessary information about size is not available. + optional int64 size = 1; + + // MediaType of the referenced object. + optional string mediaType = 2; +} + // ImageList is a list of Image objects. message ImageList { // Standard object's metadata. @@ -237,6 +261,20 @@ message ImageStreamImportStatus { repeated ImageImportStatus images = 3; } +// ImageStreamLayers describes information about the layers referenced by images in this +// image stream. +message ImageStreamLayers { + // Standard object's metadata. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // blobs is a map of blob name to metadata about the blob. + map blobs = 2; + + // images is a map between an image name and the names of the blobs and manifests that + // comprise the image. + map images = 3; +} + // ImageStreamList is a list of ImageStream objects. message ImageStreamList { // Standard object's metadata. diff --git a/api/swagger-spec/openshift-openapi-spec.json b/api/swagger-spec/openshift-openapi-spec.json index 55e18bda038b..58b5c5564881 100644 --- a/api/swagger-spec/openshift-openapi-spec.json +++ b/api/swagger-spec/openshift-openapi-spec.json @@ -59949,6 +59949,68 @@ } ] }, + "/apis/image.openshift.io/v1/namespaces/{namespace}/imagestreams/{name}/layers": { + "get": { + "description": "read layers of the specified ImageStream", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "imageOpenshiftIo_v1" + ], + "operationId": "readImageOpenshiftIoV1NamespacedImageStreamLayers", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/com.github.openshift.api.image.v1.ImageStreamLayers" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "x-kubernetes-action": "get", + "x-kubernetes-group-version-kind": { + "group": "image.openshift.io", + "kind": "ImageStreamLayers", + "version": "v1" + } + }, + "parameters": [ + { + "uniqueItems": true, + "type": "string", + "description": "name of the ImageStreamLayers", + "name": "name", + "in": "path", + "required": true + }, + { + "uniqueItems": true, + "type": "string", + "description": "object name and auth scope, such as for teams and projects", + "name": "namespace", + "in": "path", + "required": true + }, + { + "uniqueItems": true, + "type": "string", + "description": "If 'true', then the output is pretty printed.", + "name": "pretty", + "in": "query" + } + ] + }, "/apis/image.openshift.io/v1/namespaces/{namespace}/imagestreams/{name}/secrets": { "get": { "description": "read secrets of the specified ImageStream", @@ -115518,6 +115580,22 @@ } ] }, + "com.github.openshift.api.image.v1.ImageBlobReferences": { + "description": "ImageBlobReferences describes the blob references within an image.", + "properties": { + "layers": { + "description": "layers is the list of blobs that compose this image, from base layer to top layer. All layers referenced by this array will be defined in the blobs map. Some images may have zero layers.", + "type": "array", + "items": { + "type": "string" + } + }, + "manifest": { + "description": "manifest, if set, is the blob that contains the image manifest. Some images do not have separate manifest blobs and this field will be set to nil if so.", + "type": "string" + } + } + }, "com.github.openshift.api.image.v1.ImageImportSpec": { "description": "ImageImportSpec describes a request to import a specific image.", "required": [ @@ -115589,6 +115667,24 @@ } } }, + "com.github.openshift.api.image.v1.ImageLayerData": { + "description": "ImageLayerData contains metadata about an image layer.", + "required": [ + "size", + "mediaType" + ], + "properties": { + "mediaType": { + "description": "MediaType of the referenced object.", + "type": "string" + }, + "size": { + "description": "Size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", + "type": "integer", + "format": "int64" + } + } + }, "com.github.openshift.api.image.v1.ImageList": { "description": "ImageList is a list of Image objects.", "required": [ @@ -115874,6 +115970,48 @@ } } }, + "com.github.openshift.api.image.v1.ImageStreamLayers": { + "description": "ImageStreamLayers describes information about the layers referenced by images in this image stream.", + "required": [ + "blobs", + "images" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + "type": "string" + }, + "blobs": { + "description": "blobs is a map of blob name to metadata about the blob.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/com.github.openshift.api.image.v1.ImageLayerData" + } + }, + "images": { + "description": "images is a map between an image name and the names of the blobs and manifests that comprise the image.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/com.github.openshift.api.image.v1.ImageBlobReferences" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "image.openshift.io", + "kind": "ImageStreamLayers", + "version": "v1" + } + ] + }, "com.github.openshift.api.image.v1.ImageStreamList": { "description": "ImageStreamList is a list of ImageStream objects.", "required": [ diff --git a/glide.lock b/glide.lock index 210531561003..40bb378ae03a 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ hash: df63691eefa0455af08310071c1457c268b7d58f2ceb813c0c29d06ec75a5d3d -updated: 2018-07-06T14:12:33.305926721-04:00 +updated: 2018-07-10T19:25:18.202568838-04:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 @@ -809,7 +809,7 @@ imports: - go-selinux - go-selinux/label - name: github.com/openshift/api - version: 95626b0211df2ed778b4d68c3e9e61138677d315 + version: 04a26bf3b8d69c390642c5803fe4cfdb899112aa subpackages: - apps/v1 - authorization/v1 @@ -831,7 +831,7 @@ imports: - user/v1 - webconsole/v1 - name: github.com/openshift/client-go - version: 4688ad28de2e88110c0ea30179c51b9b205f99be + version: b22949ca0eae046da7611d0d20c8089dcec212d6 subpackages: - apps/clientset/versioned - apps/clientset/versioned/scheme @@ -938,7 +938,7 @@ imports: - user/informers/externalversions/user/v1 - user/listers/user/v1 - name: github.com/openshift/imagebuilder - version: 4670fc31fca6121da0dcf42d6b9b298479c1715a + version: bfc0aea02ce95dcfc1a83d452270dbc66933709b subpackages: - dockerclient - imageprogress @@ -957,7 +957,7 @@ imports: - pkg/operator/resource/resourcemerge - pkg/serviceability - name: github.com/openshift/service-serving-cert-signer - version: 4e91b73f7394df0fffb885c35ac9b517cc385d2e + version: 10d530a76d1e98ae342df2fa5a3cbefd5dc3f241 subpackages: - pkg/controller/servingcert - pkg/controller/servingcert/cryptoextensions @@ -1827,7 +1827,7 @@ imports: - pkg/util/proto/testing - pkg/util/proto/validation - name: k8s.io/kubernetes - version: 5200705703cbf4abe250da3822f67e244e11d5b0 + version: aebe0498deb85f935a0126a4859b2a7610bd69c5 repo: https://github.com/openshift/kubernetes.git subpackages: - cmd/controller-manager/app diff --git a/pkg/cmd/server/origin/legacy.go b/pkg/cmd/server/origin/legacy.go index 8372c0c75964..d44ab07ca9fe 100644 --- a/pkg/cmd/server/origin/legacy.go +++ b/pkg/cmd/server/origin/legacy.go @@ -207,6 +207,8 @@ func LegacyStorage(storage map[schema.GroupVersion]map[string]rest.Storage) map[ case *imagestreametcd.REST: legacyStorage[resource] = &imagestreametcd.LegacyREST{REST: storage} + case *imagestreametcd.LayersREST: + delete(legacyStorage, resource) case *routeetcd.REST: store := *storage.Store diff --git a/pkg/image/apis/image/register.go b/pkg/image/apis/image/register.go index 3f7820dfb7b1..86333f0ffbae 100644 --- a/pkg/image/apis/image/register.go +++ b/pkg/image/apis/image/register.go @@ -72,6 +72,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ImageStreamTag{}, &ImageStreamTagList{}, &ImageStreamImage{}, + &ImageStreamLayers{}, &ImageStreamImport{}, &kapi.SecretList{}, ) diff --git a/pkg/image/apis/image/types.go b/pkg/image/apis/image/types.go index 188d0d0afc39..2e564ed006e3 100644 --- a/pkg/image/apis/image/types.go +++ b/pkg/image/apis/image/types.go @@ -469,6 +469,43 @@ type ImageStreamImage struct { // DockerImageReference points to a Docker image. type DockerImageReference = reference.DockerImageReference +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamLayers describes information about the layers referenced by images in this +// image stream. +type ImageStreamLayers struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ObjectMeta + // blobs is a map of blob name to metadata about the blob. + Blobs map[string]ImageLayerData + // images is a map between an image name and the names of the blobs and manifests that + // comprise the image. + Images map[string]ImageBlobReferences +} + +// ImageBlobReferences describes the blob references within an image. +type ImageBlobReferences struct { + // layers is the list of blobs that compose this image, from base layer to top layer. + // All layers referenced by this array will be defined in the blobs map. Some images + // may have zero layers. + // +optional + Layers []string + // manifest, if set, is the blob that contains the image manifest. Some images do + // not have separate manifest blobs and this field will be set to nil if so. + // +optional + Manifest *string +} + +// ImageLayerData contains metadata about an image layer. +type ImageLayerData struct { + // Size of the layer in bytes as defined by the underlying store. This field is + // optional if the necessary information about size is not available. + LayerSize *int64 + // MediaType of the referenced object. + MediaType string +} + // +genclient // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/image/apis/image/v1/zz_generated.conversion.go b/pkg/image/apis/image/v1/zz_generated.conversion.go index 55d53f661974..35b3d5f7668a 100644 --- a/pkg/image/apis/image/v1/zz_generated.conversion.go +++ b/pkg/image/apis/image/v1/zz_generated.conversion.go @@ -27,12 +27,16 @@ func RegisterConversions(scheme *runtime.Scheme) error { return scheme.AddGeneratedConversionFuncs( Convert_v1_Image_To_image_Image, Convert_image_Image_To_v1_Image, + Convert_v1_ImageBlobReferences_To_image_ImageBlobReferences, + Convert_image_ImageBlobReferences_To_v1_ImageBlobReferences, Convert_v1_ImageImportSpec_To_image_ImageImportSpec, Convert_image_ImageImportSpec_To_v1_ImageImportSpec, Convert_v1_ImageImportStatus_To_image_ImageImportStatus, Convert_image_ImageImportStatus_To_v1_ImageImportStatus, Convert_v1_ImageLayer_To_image_ImageLayer, Convert_image_ImageLayer_To_v1_ImageLayer, + Convert_v1_ImageLayerData_To_image_ImageLayerData, + Convert_image_ImageLayerData_To_v1_ImageLayerData, Convert_v1_ImageList_To_image_ImageList, Convert_image_ImageList_To_v1_ImageList, Convert_v1_ImageLookupPolicy_To_image_ImageLookupPolicy, @@ -49,6 +53,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_image_ImageStreamImportSpec_To_v1_ImageStreamImportSpec, Convert_v1_ImageStreamImportStatus_To_image_ImageStreamImportStatus, Convert_image_ImageStreamImportStatus_To_v1_ImageStreamImportStatus, + Convert_v1_ImageStreamLayers_To_image_ImageStreamLayers, + Convert_image_ImageStreamLayers_To_v1_ImageStreamLayers, Convert_v1_ImageStreamList_To_image_ImageStreamList, Convert_image_ImageStreamList_To_v1_ImageStreamList, Convert_v1_ImageStreamMapping_To_image_ImageStreamMapping, @@ -120,6 +126,28 @@ func autoConvert_image_Image_To_v1_Image(in *image.Image, out *v1.Image, s conve return nil } +func autoConvert_v1_ImageBlobReferences_To_image_ImageBlobReferences(in *v1.ImageBlobReferences, out *image.ImageBlobReferences, s conversion.Scope) error { + out.Layers = *(*[]string)(unsafe.Pointer(&in.Layers)) + out.Manifest = (*string)(unsafe.Pointer(in.Manifest)) + return nil +} + +// Convert_v1_ImageBlobReferences_To_image_ImageBlobReferences is an autogenerated conversion function. +func Convert_v1_ImageBlobReferences_To_image_ImageBlobReferences(in *v1.ImageBlobReferences, out *image.ImageBlobReferences, s conversion.Scope) error { + return autoConvert_v1_ImageBlobReferences_To_image_ImageBlobReferences(in, out, s) +} + +func autoConvert_image_ImageBlobReferences_To_v1_ImageBlobReferences(in *image.ImageBlobReferences, out *v1.ImageBlobReferences, s conversion.Scope) error { + out.Layers = *(*[]string)(unsafe.Pointer(&in.Layers)) + out.Manifest = (*string)(unsafe.Pointer(in.Manifest)) + return nil +} + +// Convert_image_ImageBlobReferences_To_v1_ImageBlobReferences is an autogenerated conversion function. +func Convert_image_ImageBlobReferences_To_v1_ImageBlobReferences(in *image.ImageBlobReferences, out *v1.ImageBlobReferences, s conversion.Scope) error { + return autoConvert_image_ImageBlobReferences_To_v1_ImageBlobReferences(in, out, s) +} + func autoConvert_v1_ImageImportSpec_To_image_ImageImportSpec(in *v1.ImageImportSpec, out *image.ImageImportSpec, s conversion.Scope) error { if err := core_v1.Convert_v1_ObjectReference_To_core_ObjectReference(&in.From, &out.From, s); err != nil { return err @@ -240,6 +268,28 @@ func Convert_image_ImageLayer_To_v1_ImageLayer(in *image.ImageLayer, out *v1.Ima return autoConvert_image_ImageLayer_To_v1_ImageLayer(in, out, s) } +func autoConvert_v1_ImageLayerData_To_image_ImageLayerData(in *v1.ImageLayerData, out *image.ImageLayerData, s conversion.Scope) error { + out.LayerSize = (*int64)(unsafe.Pointer(in.LayerSize)) + out.MediaType = in.MediaType + return nil +} + +// Convert_v1_ImageLayerData_To_image_ImageLayerData is an autogenerated conversion function. +func Convert_v1_ImageLayerData_To_image_ImageLayerData(in *v1.ImageLayerData, out *image.ImageLayerData, s conversion.Scope) error { + return autoConvert_v1_ImageLayerData_To_image_ImageLayerData(in, out, s) +} + +func autoConvert_image_ImageLayerData_To_v1_ImageLayerData(in *image.ImageLayerData, out *v1.ImageLayerData, s conversion.Scope) error { + out.LayerSize = (*int64)(unsafe.Pointer(in.LayerSize)) + out.MediaType = in.MediaType + return nil +} + +// Convert_image_ImageLayerData_To_v1_ImageLayerData is an autogenerated conversion function. +func Convert_image_ImageLayerData_To_v1_ImageLayerData(in *image.ImageLayerData, out *v1.ImageLayerData, s conversion.Scope) error { + return autoConvert_image_ImageLayerData_To_v1_ImageLayerData(in, out, s) +} + func autoConvert_v1_ImageList_To_image_ImageList(in *v1.ImageList, out *image.ImageList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { @@ -564,6 +614,30 @@ func Convert_image_ImageStreamImportStatus_To_v1_ImageStreamImportStatus(in *ima return autoConvert_image_ImageStreamImportStatus_To_v1_ImageStreamImportStatus(in, out, s) } +func autoConvert_v1_ImageStreamLayers_To_image_ImageStreamLayers(in *v1.ImageStreamLayers, out *image.ImageStreamLayers, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Blobs = *(*map[string]image.ImageLayerData)(unsafe.Pointer(&in.Blobs)) + out.Images = *(*map[string]image.ImageBlobReferences)(unsafe.Pointer(&in.Images)) + return nil +} + +// Convert_v1_ImageStreamLayers_To_image_ImageStreamLayers is an autogenerated conversion function. +func Convert_v1_ImageStreamLayers_To_image_ImageStreamLayers(in *v1.ImageStreamLayers, out *image.ImageStreamLayers, s conversion.Scope) error { + return autoConvert_v1_ImageStreamLayers_To_image_ImageStreamLayers(in, out, s) +} + +func autoConvert_image_ImageStreamLayers_To_v1_ImageStreamLayers(in *image.ImageStreamLayers, out *v1.ImageStreamLayers, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Blobs = *(*map[string]v1.ImageLayerData)(unsafe.Pointer(&in.Blobs)) + out.Images = *(*map[string]v1.ImageBlobReferences)(unsafe.Pointer(&in.Images)) + return nil +} + +// Convert_image_ImageStreamLayers_To_v1_ImageStreamLayers is an autogenerated conversion function. +func Convert_image_ImageStreamLayers_To_v1_ImageStreamLayers(in *image.ImageStreamLayers, out *v1.ImageStreamLayers, s conversion.Scope) error { + return autoConvert_image_ImageStreamLayers_To_v1_ImageStreamLayers(in, out, s) +} + func autoConvert_v1_ImageStreamList_To_image_ImageStreamList(in *v1.ImageStreamList, out *image.ImageStreamList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { diff --git a/pkg/image/apis/image/zz_generated.deepcopy.go b/pkg/image/apis/image/zz_generated.deepcopy.go index 0f6a00114cdc..d8e4d7896f9b 100644 --- a/pkg/image/apis/image/zz_generated.deepcopy.go +++ b/pkg/image/apis/image/zz_generated.deepcopy.go @@ -376,6 +376,36 @@ func (in *Image) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBlobReferences) DeepCopyInto(out *ImageBlobReferences) { + *out = *in + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Manifest != nil { + in, out := &in.Manifest, &out.Manifest + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBlobReferences. +func (in *ImageBlobReferences) DeepCopy() *ImageBlobReferences { + if in == nil { + return nil + } + out := new(ImageBlobReferences) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageImportSpec) DeepCopyInto(out *ImageImportSpec) { *out = *in @@ -446,6 +476,31 @@ func (in *ImageLayer) DeepCopy() *ImageLayer { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLayerData) DeepCopyInto(out *ImageLayerData) { + *out = *in + if in.LayerSize != nil { + in, out := &in.LayerSize, &out.LayerSize + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayerData. +func (in *ImageLayerData) DeepCopy() *ImageLayerData { + if in == nil { + return nil + } + out := new(ImageLayerData) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageList) DeepCopyInto(out *ImageList) { *out = *in @@ -722,6 +777,50 @@ func (in *ImageStreamImportStatus) DeepCopy() *ImageStreamImportStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamLayers) DeepCopyInto(out *ImageStreamLayers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Blobs != nil { + in, out := &in.Blobs, &out.Blobs + *out = make(map[string]ImageLayerData, len(*in)) + for key, val := range *in { + newVal := new(ImageLayerData) + val.DeepCopyInto(newVal) + (*out)[key] = *newVal + } + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make(map[string]ImageBlobReferences, len(*in)) + for key, val := range *in { + newVal := new(ImageBlobReferences) + val.DeepCopyInto(newVal) + (*out)[key] = *newVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamLayers. +func (in *ImageStreamLayers) DeepCopy() *ImageStreamLayers { + if in == nil { + return nil + } + out := new(ImageStreamLayers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamLayers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageStreamList) DeepCopyInto(out *ImageStreamList) { *out = *in diff --git a/pkg/image/apiserver/apiserver.go b/pkg/image/apiserver/apiserver.go index a37f4e62ec76..414147ef0a86 100644 --- a/pkg/image/apiserver/apiserver.go +++ b/pkg/image/apiserver/apiserver.go @@ -56,6 +56,7 @@ type ExtraConfig struct { makeV1Storage sync.Once v1Storage map[string]rest.Storage v1StorageErr error + startFns []func(<-chan struct{}) } type ImageAPIServerConfig struct { @@ -109,6 +110,15 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) return nil, err } + if err := s.GenericAPIServer.AddPostStartHook("image.openshift.io-apiserver-caches", func(context genericapiserver.PostStartHookContext) error { + for _, fn := range c.ExtraConfig.startFns { + go fn(context.StopCh) + } + return nil + }); err != nil { + return nil, err + } + return s, nil } @@ -193,10 +203,13 @@ func (c *completedConfig) newV1RESTStorage() (map[string]rest.Storage, error) { whitelister = whitelist.WhitelistAllRegistries() } + imageLayerIndex := imagestreametcd.NewImageLayerIndex(imageV1Client.Image().Images()) + c.ExtraConfig.startFns = append(c.ExtraConfig.startFns, imageLayerIndex.Run) + imageRegistry := image.NewRegistry(imageStorage) imageSignatureStorage := imagesignature.NewREST(imageClient.Image()) imageStreamSecretsStorage := imagesecret.NewREST(coreClient) - imageStreamStorage, imageStreamStatusStorage, internalImageStreamStorage, err := imagestreametcd.NewREST(c.GenericConfig.RESTOptionsGetter, c.ExtraConfig.RegistryHostnameRetriever, authorizationClient.SubjectAccessReviews(), c.ExtraConfig.LimitVerifier, whitelister) + imageStreamStorage, imageStreamLayersStorage, imageStreamStatusStorage, internalImageStreamStorage, err := imagestreametcd.NewREST(c.GenericConfig.RESTOptionsGetter, c.ExtraConfig.RegistryHostnameRetriever, authorizationClient.SubjectAccessReviews(), c.ExtraConfig.LimitVerifier, whitelister, imageLayerIndex) if err != nil { return nil, fmt.Errorf("error building REST storage: %v", err) } @@ -231,6 +244,7 @@ func (c *completedConfig) newV1RESTStorage() (map[string]rest.Storage, error) { v1Storage["imagesignatures"] = imageSignatureStorage v1Storage["imageStreams/secrets"] = imageStreamSecretsStorage v1Storage["imageStreams"] = imageStreamStorage + v1Storage["imageStreams/layers"] = imageStreamLayersStorage v1Storage["imageStreams/status"] = imageStreamStatusStorage v1Storage["imageStreamImports"] = imageStreamImportStorage v1Storage["imageStreamImages"] = imageStreamImageStorage diff --git a/pkg/image/registry/imagestream/etcd/etcd.go b/pkg/image/registry/imagestream/etcd/etcd.go index cd65f6bc4e1f..f3fa49203a49 100644 --- a/pkg/image/registry/imagestream/etcd/etcd.go +++ b/pkg/image/registry/imagestream/etcd/etcd.go @@ -3,6 +3,7 @@ package etcd import ( "context" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" @@ -47,7 +48,8 @@ func NewREST( subjectAccessReviewRegistry authorizationclient.SubjectAccessReviewInterface, limitVerifier imageadmission.LimitVerifier, registryWhitelister whitelist.RegistryWhitelister, -) (*REST, *StatusREST, *InternalREST, error) { + imageLayerIndex ImageLayerIndex, +) (*REST, *LayersREST, *StatusREST, *InternalREST, error) { store := registry.Store{ NewFunc: func() runtime.Object { return &imageapi.ImageStream{} }, NewListFunc: func() runtime.Object { return &imageapi.ImageStreamList{} }, @@ -72,9 +74,11 @@ func NewREST( AttrFunc: storage.AttrFunc(storage.DefaultNamespaceScopedAttr).WithFieldMutation(imageapi.ImageStreamSelector), } if err := store.CompleteWithOptions(options); err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } + layersREST := &LayersREST{index: imageLayerIndex, store: &store} + statusStrategy := imagestream.NewStatusStrategy(strategy) statusStore := store statusStore.Decorator = nil @@ -89,7 +93,7 @@ func NewREST( internalStore.UpdateStrategy = internalStrategy internalREST := &InternalREST{store: &internalStore} - return rest, statusREST, internalREST, nil + return rest, layersREST, statusREST, internalREST, nil } // StatusREST implements the REST endpoint for changing the status of an image stream. @@ -139,6 +143,72 @@ func (r *InternalREST) Update(ctx context.Context, name string, objInfo rest.Upd return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } +// LayersREST implements the REST endpoint for changing both the spec and status of an image stream. +type LayersREST struct { + store *registry.Store + index ImageLayerIndex +} + +var _ rest.Getter = &LayersREST{} + +func (r *LayersREST) New() runtime.Object { + return &imageapi.ImageStreamLayers{} +} + +// Get returns the layers for an image stream. +func (r *LayersREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { + if !r.index.HasSynced() { + return nil, errors.NewServerTimeout(r.store.DefaultQualifiedResource, "get", 2) + } + obj, err := r.store.Get(ctx, name, options) + if err != nil { + return nil, err + } + is := obj.(*imageapi.ImageStream) + isl := &imageapi.ImageStreamLayers{ + ObjectMeta: is.ObjectMeta, + Blobs: make(map[string]imageapi.ImageLayerData), + Images: make(map[string]imageapi.ImageBlobReferences), + } + + for _, status := range is.Status.Tags { + for _, item := range status.Items { + if len(item.Image) == 0 { + continue + } + + obj, _, _ := r.index.GetByKey(item.Image) + entry, ok := obj.(*ImageLayers) + if !ok { + continue + } + + if _, ok := isl.Images[item.Image]; !ok { + var reference imageapi.ImageBlobReferences + for _, layer := range entry.Layers { + reference.Layers = append(reference.Layers, layer.Name) + if _, ok := isl.Blobs[layer.Name]; !ok { + isl.Blobs[layer.Name] = imageapi.ImageLayerData{LayerSize: &layer.LayerSize, MediaType: layer.MediaType} + } + } + if blob := entry.Manifest; blob != nil { + reference.Manifest = &blob.Name + if _, ok := isl.Blobs[blob.Name]; !ok { + if blob.LayerSize == 0 { + // only send media type since we don't the size of the manifest + isl.Blobs[blob.Name] = imageapi.ImageLayerData{MediaType: blob.MediaType} + } else { + isl.Blobs[blob.Name] = imageapi.ImageLayerData{LayerSize: &blob.LayerSize, MediaType: blob.MediaType} + } + } + } + isl.Images[item.Image] = reference + } + } + } + return isl, nil +} + // LegacyREST allows us to wrap and alter some behavior type LegacyREST struct { *REST diff --git a/pkg/image/registry/imagestream/etcd/etcd_test.go b/pkg/image/registry/imagestream/etcd/etcd_test.go index 35a18abac2be..ee7d5f2a2afb 100644 --- a/pkg/image/registry/imagestream/etcd/etcd_test.go +++ b/pkg/image/registry/imagestream/etcd/etcd_test.go @@ -53,12 +53,14 @@ func newStorage(t *testing.T) (*REST, *StatusREST, *InternalREST, *etcdtesting.E server, etcdStorage := etcdtesting.NewUnsecuredEtcd3TestClientServer(t) etcdStorage.Codec = legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Group: "image.openshift.io", Version: "v1"}) registry := imageapi.DefaultRegistryHostnameRetriever(noDefaultRegistry, "", "") - imageStorage, statusStorage, internalStorage, err := NewREST( + imageStorage, _, statusStorage, internalStorage, err := NewREST( restoptions.NewSimpleGetter(etcdStorage), registry, &fakeSubjectAccessReviewRegistry{}, &admfake.ImageStreamLimitVerifier{}, - &fake.RegistryWhitelister{}) + &fake.RegistryWhitelister{}, + NewEmptyLayerIndex(), + ) if err != nil { t.Fatal(err) } diff --git a/pkg/image/registry/imagestream/etcd/image.go b/pkg/image/registry/imagestream/etcd/image.go new file mode 100644 index 000000000000..704371382bab --- /dev/null +++ b/pkg/image/registry/imagestream/etcd/image.go @@ -0,0 +1,225 @@ +package etcd + +import ( + "fmt" + + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/api/legacyscheme" + + imagev1 "github.com/openshift/api/image/v1" + imageapi "github.com/openshift/origin/pkg/image/apis/image" +) + +// ImageLayerIndex is a cache of image digests to the layers they contain. +// Because a very large number of images can exist on a cluster, we only +// hold in memory a small subset of the full image object. +type ImageLayerIndex interface { + HasSynced() bool + GetByKey(key string) (item interface{}, exists bool, err error) + Run(stopCh <-chan struct{}) +} + +type ImageListWatch interface { + List(metav1.ListOptions) (*imagev1.ImageList, error) + Watch(metav1.ListOptions) (watch.Interface, error) +} + +type imageLayerIndex struct { + informer cache.SharedIndexInformer +} + +func NewEmptyLayerIndex() ImageLayerIndex { + return imageLayerIndex{} +} + +func (i imageLayerIndex) HasSynced() bool { + if i.informer == nil { + return true + } + return i.informer.HasSynced() +} +func (i imageLayerIndex) GetByKey(key string) (item interface{}, exists bool, err error) { + if i.informer == nil { + return nil, false, nil + } + return i.informer.GetStore().GetByKey(key) +} +func (i imageLayerIndex) Run(stopCh <-chan struct{}) { + if i.informer == nil { + return + } + i.informer.Run(stopCh) +} + +// NewImageLayerIndex creates a new index over a store that must return +// images. +func NewImageLayerIndex(lw ImageListWatch) ImageLayerIndex { + informer := cache.NewSharedIndexInformer(&cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + list, err := lw.List(metav1.ListOptions{ + ResourceVersion: options.ResourceVersion, + Limit: options.Limit, + Continue: options.Continue, + }) + if err != nil { + return nil, err + } + // reduce the full image list to a smaller subset. + out := &metainternalversion.List{ + ListMeta: metav1.ListMeta{ + Continue: list.Continue, + ResourceVersion: list.ResourceVersion, + }, + Items: make([]runtime.Object, len(list.Items)), + } + for i, image := range list.Items { + out.Items[i] = imageLayersForImage(&image) + } + return out, nil + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + w, err := lw.Watch(metav1.ListOptions{ + ResourceVersion: options.ResourceVersion, + }) + if err != nil { + return nil, err + } + return watch.Filter(w, func(in watch.Event) (out watch.Event, keep bool) { + if in.Object == nil { + return in, true + } + // reduce each object to the minimal subset we need for the cache + image, ok := in.Object.(*imagev1.Image) + if !ok { + return in, true + } + in.Object = imageLayersForImage(image) + return in, true + }), nil + }, + }, &ImageLayers{}, 0, cache.Indexers{ + // layers allows fast access to the images with a given layer + "layers": func(obj interface{}) ([]string, error) { + entry, ok := obj.(*ImageLayers) + if !ok { + return nil, fmt.Errorf("unexpected cache object %T", obj) + } + keys := make([]string, 0, len(entry.Layers)) + for _, layer := range entry.Layers { + keys = append(keys, layer.Name) + } + return keys, nil + }, + }) + return imageLayerIndex{informer: informer} +} + +// manifestFromImage attempts to find a manifest blob description from +// an image. Images older than schema2 in Docker do not have a manifest blob. +func manifestFromImage(image *imagev1.Image) *imagev1.ImageLayer { + if image.DockerImageManifestMediaType != "application/vnd.docker.distribution.manifest.v2+json" { + return nil + } + meta := &imageapi.DockerImage{} + if _, _, err := legacyscheme.Codecs.UniversalDecoder().Decode(image.DockerImageMetadata.Raw, nil, meta); err != nil { + utilruntime.HandleError(fmt.Errorf("Unable to decode image for layer cache: %v", err)) + return nil + } + return &imagev1.ImageLayer{ + Name: meta.ID, + MediaType: image.DockerImageManifestMediaType, + } +} + +// ImageLayers is the minimal set of data we need to retain to provide the cache. +// Unlike a more general informer cache, we do not retain the full object because of +// the potential size of the objects being stored. Even a small cluster may have 20k +// or more images in active use. +type ImageLayers struct { + Name string + ResourceVersion string + Manifest *imagev1.ImageLayer + Layers []imagev1.ImageLayer +} + +func imageLayersForImage(image *imagev1.Image) *ImageLayers { + return &ImageLayers{ + Name: image.Name, + ResourceVersion: image.ResourceVersion, + Layers: image.DockerImageLayers, + Manifest: manifestFromImage(image), + } +} + +var ( + _ runtime.Object = &ImageLayers{} + _ metav1.Object = &ImageLayers{} +) + +func (l *ImageLayers) GetObjectKind() schema.ObjectKind { return &metav1.TypeMeta{} } +func (l *ImageLayers) DeepCopyObject() runtime.Object { + var layers []imagev1.ImageLayer + if l.Layers != nil { + layers = make([]imagev1.ImageLayer, len(l.Layers)) + copy(layers, l.Layers) + } + var manifest *imagev1.ImageLayer + if l.Manifest != nil { + copied := *l.Manifest + manifest = &copied + } + return &ImageLayers{ + Name: l.Name, + ResourceVersion: l.ResourceVersion, + Manifest: manifest, + Layers: layers, + } +} + +// client-go/cache.SharedIndexInformer hardcodes the key function to assume ObjectMeta. +// Here we implement the relevant accessors to allow a minimal index to be created. +// SharedIndexInformer will be refactored to require a more minimal subset of actions +// in the near future. + +func (l *ImageLayers) GetName() string { return l.Name } +func (l *ImageLayers) GetNamespace() string { return "" } +func (l *ImageLayers) GetResourceVersion() string { return l.ResourceVersion } +func (l *ImageLayers) SetResourceVersion(version string) { l.ResourceVersion = version } + +// These methods are unused stubs to satisfy meta.Object. + +func (l *ImageLayers) SetNamespace(namespace string) {} +func (l *ImageLayers) SetName(name string) {} +func (l *ImageLayers) GetGenerateName() string { return "" } +func (l *ImageLayers) SetGenerateName(name string) {} +func (l *ImageLayers) GetUID() types.UID { return "" } +func (l *ImageLayers) SetUID(uid types.UID) {} +func (l *ImageLayers) GetGeneration() int64 { return 0 } +func (l *ImageLayers) SetGeneration(generation int64) {} +func (l *ImageLayers) GetSelfLink() string { return "" } +func (l *ImageLayers) SetSelfLink(selfLink string) {} +func (l *ImageLayers) GetCreationTimestamp() metav1.Time { return metav1.Time{} } +func (l *ImageLayers) SetCreationTimestamp(timestamp metav1.Time) {} +func (l *ImageLayers) GetDeletionTimestamp() *metav1.Time { return nil } +func (l *ImageLayers) SetDeletionTimestamp(timestamp *metav1.Time) {} +func (l *ImageLayers) GetDeletionGracePeriodSeconds() *int64 { return nil } +func (l *ImageLayers) SetDeletionGracePeriodSeconds(*int64) {} +func (l *ImageLayers) GetLabels() map[string]string { return nil } +func (l *ImageLayers) SetLabels(labels map[string]string) {} +func (l *ImageLayers) GetAnnotations() map[string]string { return nil } +func (l *ImageLayers) SetAnnotations(annotations map[string]string) {} +func (l *ImageLayers) GetInitializers() *metav1.Initializers { return nil } +func (l *ImageLayers) SetInitializers(initializers *metav1.Initializers) {} +func (l *ImageLayers) GetFinalizers() []string { return nil } +func (l *ImageLayers) SetFinalizers(finalizers []string) {} +func (l *ImageLayers) GetOwnerReferences() []metav1.OwnerReference { return nil } +func (l *ImageLayers) SetOwnerReferences([]metav1.OwnerReference) {} +func (l *ImageLayers) GetClusterName() string { return "" } +func (l *ImageLayers) SetClusterName(clusterName string) {} diff --git a/pkg/image/registry/imagestreamimage/rest_test.go b/pkg/image/registry/imagestreamimage/rest_test.go index c7fab2074fcb..f40dd268d458 100644 --- a/pkg/image/registry/imagestreamimage/rest_test.go +++ b/pkg/image/registry/imagestreamimage/rest_test.go @@ -47,7 +47,7 @@ func setup(t *testing.T) (etcd.KV, *etcdtesting.EtcdTestServer, *REST) { t.Fatal(err) } defaultRegistry := imageapi.DefaultRegistryHostnameRetriever(testDefaultRegistry, "", "") - imageStreamStorage, imageStreamStatus, internalStorage, err := imagestreametcd.NewREST(restoptions.NewSimpleGetter(etcdStorage), defaultRegistry, &fakeSubjectAccessReviewRegistry{}, &admfake.ImageStreamLimitVerifier{}, &fake.RegistryWhitelister{}) + imageStreamStorage, _, imageStreamStatus, internalStorage, err := imagestreametcd.NewREST(restoptions.NewSimpleGetter(etcdStorage), defaultRegistry, &fakeSubjectAccessReviewRegistry{}, &admfake.ImageStreamLimitVerifier{}, &fake.RegistryWhitelister{}, imagestreametcd.NewEmptyLayerIndex()) if err != nil { t.Fatal(err) } diff --git a/pkg/image/registry/imagestreammapping/rest_test.go b/pkg/image/registry/imagestreammapping/rest_test.go index 2c6ac5bad643..0a7fa28dd290 100644 --- a/pkg/image/registry/imagestreammapping/rest_test.go +++ b/pkg/image/registry/imagestreammapping/rest_test.go @@ -59,7 +59,7 @@ func setup(t *testing.T) (etcd.KV, *etcdtesting.EtcdTestServer, *REST) { t.Fatal(err) } registry := imageapi.DefaultRegistryHostnameRetriever(testDefaultRegistry, "", "") - imageStreamStorage, imageStreamStatus, internalStorage, err := imagestreametcd.NewREST(restoptions.NewSimpleGetter(etcdStorage), registry, &fakeSubjectAccessReviewRegistry{}, &admfake.ImageStreamLimitVerifier{}, &fake.RegistryWhitelister{}) + imageStreamStorage, _, imageStreamStatus, internalStorage, err := imagestreametcd.NewREST(restoptions.NewSimpleGetter(etcdStorage), registry, &fakeSubjectAccessReviewRegistry{}, &admfake.ImageStreamLimitVerifier{}, &fake.RegistryWhitelister{}, imagestreametcd.NewEmptyLayerIndex()) if err != nil { t.Fatal(err) } diff --git a/pkg/image/registry/imagestreamtag/rest_test.go b/pkg/image/registry/imagestreamtag/rest_test.go index 0b85b6e347ed..b56899582f74 100644 --- a/pkg/image/registry/imagestreamtag/rest_test.go +++ b/pkg/image/registry/imagestreamtag/rest_test.go @@ -75,12 +75,14 @@ func setup(t *testing.T) (etcd.KV, *etcdtesting.EtcdTestServer, *REST) { t.Fatal(err) } registry := imageapi.DefaultRegistryHostnameRetriever(testDefaultRegistry, "", "") - imageStreamStorage, imageStreamStatus, internalStorage, err := imagestreametcd.NewREST( + imageStreamStorage, _, imageStreamStatus, internalStorage, err := imagestreametcd.NewREST( restoptions.NewSimpleGetter(etcdStorage), registry, &fakeSubjectAccessReviewRegistry{}, &admfake.ImageStreamLimitVerifier{}, - rw) + rw, + imagestreametcd.NewEmptyLayerIndex(), + ) if err != nil { t.Fatal(err) } diff --git a/pkg/openapi/zz_generated.openapi.go b/pkg/openapi/zz_generated.openapi.go index 8a1b2fa35633..f56dfc230c0f 100644 --- a/pkg/openapi/zz_generated.openapi.go +++ b/pkg/openapi/zz_generated.openapi.go @@ -131,9 +131,11 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/openshift/api/config/v1.ServingInfo": schema_openshift_api_config_v1_ServingInfo(ref), "github.com/openshift/api/image/v1.DockerImageReference": schema_openshift_api_image_v1_DockerImageReference(ref), "github.com/openshift/api/image/v1.Image": schema_openshift_api_image_v1_Image(ref), + "github.com/openshift/api/image/v1.ImageBlobReferences": schema_openshift_api_image_v1_ImageBlobReferences(ref), "github.com/openshift/api/image/v1.ImageImportSpec": schema_openshift_api_image_v1_ImageImportSpec(ref), "github.com/openshift/api/image/v1.ImageImportStatus": schema_openshift_api_image_v1_ImageImportStatus(ref), "github.com/openshift/api/image/v1.ImageLayer": schema_openshift_api_image_v1_ImageLayer(ref), + "github.com/openshift/api/image/v1.ImageLayerData": schema_openshift_api_image_v1_ImageLayerData(ref), "github.com/openshift/api/image/v1.ImageList": schema_openshift_api_image_v1_ImageList(ref), "github.com/openshift/api/image/v1.ImageLookupPolicy": schema_openshift_api_image_v1_ImageLookupPolicy(ref), "github.com/openshift/api/image/v1.ImageSignature": schema_openshift_api_image_v1_ImageSignature(ref), @@ -142,6 +144,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/openshift/api/image/v1.ImageStreamImport": schema_openshift_api_image_v1_ImageStreamImport(ref), "github.com/openshift/api/image/v1.ImageStreamImportSpec": schema_openshift_api_image_v1_ImageStreamImportSpec(ref), "github.com/openshift/api/image/v1.ImageStreamImportStatus": schema_openshift_api_image_v1_ImageStreamImportStatus(ref), + "github.com/openshift/api/image/v1.ImageStreamLayers": schema_openshift_api_image_v1_ImageStreamLayers(ref), "github.com/openshift/api/image/v1.ImageStreamList": schema_openshift_api_image_v1_ImageStreamList(ref), "github.com/openshift/api/image/v1.ImageStreamMapping": schema_openshift_api_image_v1_ImageStreamMapping(ref), "github.com/openshift/api/image/v1.ImageStreamSpec": schema_openshift_api_image_v1_ImageStreamSpec(ref), @@ -6832,6 +6835,40 @@ func schema_openshift_api_image_v1_Image(ref common.ReferenceCallback) common.Op } } +func schema_openshift_api_image_v1_ImageBlobReferences(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ImageBlobReferences describes the blob references within an image.", + Properties: map[string]spec.Schema{ + "layers": { + SchemaProps: spec.SchemaProps{ + Description: "layers is the list of blobs that compose this image, from base layer to top layer. All layers referenced by this array will be defined in the blobs map. Some images may have zero layers.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "manifest": { + SchemaProps: spec.SchemaProps{ + Description: "manifest, if set, is the blob that contains the image manifest. Some images do not have separate manifest blobs and this field will be set to nil if so.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{}, + } +} + func schema_openshift_api_image_v1_ImageImportSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -6947,6 +6984,34 @@ func schema_openshift_api_image_v1_ImageLayer(ref common.ReferenceCallback) comm } } +func schema_openshift_api_image_v1_ImageLayerData(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ImageLayerData contains metadata about an image layer.", + Properties: map[string]spec.Schema{ + "size": { + SchemaProps: spec.SchemaProps{ + Description: "Size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "mediaType": { + SchemaProps: spec.SchemaProps{ + Description: "MediaType of the referenced object.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"size", "mediaType"}, + }, + }, + Dependencies: []string{}, + } +} + func schema_openshift_api_image_v1_ImageList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -7338,6 +7403,67 @@ func schema_openshift_api_image_v1_ImageStreamImportStatus(ref common.ReferenceC } } +func schema_openshift_api_image_v1_ImageStreamLayers(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ImageStreamLayers describes information about the layers referenced by images in this image stream.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "blobs": { + SchemaProps: spec.SchemaProps{ + Description: "blobs is a map of blob name to metadata about the blob.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/image/v1.ImageLayerData"), + }, + }, + }, + }, + }, + "images": { + SchemaProps: spec.SchemaProps{ + Description: "images is a map between an image name and the names of the blobs and manifests that comprise the image.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/image/v1.ImageBlobReferences"), + }, + }, + }, + }, + }, + }, + Required: []string{"blobs", "images"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/image/v1.ImageBlobReferences", "github.com/openshift/api/image/v1.ImageLayerData", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + func schema_openshift_api_image_v1_ImageStreamList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/test/extended/images/layers.go b/test/extended/images/layers.go new file mode 100644 index 000000000000..a19d67c6df74 --- /dev/null +++ b/test/extended/images/layers.go @@ -0,0 +1,181 @@ +package images + +import ( + "fmt" + "time" + + g "github.com/onsi/ginkgo" + o "github.com/onsi/gomega" + + kapi "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + buildapi "github.com/openshift/api/build/v1" + imageapi "github.com/openshift/api/image/v1" + buildclientset "github.com/openshift/client-go/build/clientset/versioned" + imageclientset "github.com/openshift/client-go/image/clientset/versioned" + exutil "github.com/openshift/origin/test/extended/util" +) + +var _ = g.Describe("[Feature:ImageLayers] Image layer subresource", func() { + defer g.GinkgoRecover() + var oc *exutil.CLI + var ns []string + + g.AfterEach(func() { + if g.CurrentGinkgoTestDescription().Failed { + for _, s := range ns { + exutil.DumpPodLogsStartingWithInNamespace("", s, oc) + } + } + }) + + oc = exutil.NewCLI("image-layers", exutil.KubeConfigPath()) + + g.It("should return layers from tagged images", func() { + ns = []string{oc.Namespace()} + client := imageclientset.NewForConfigOrDie(oc.UserConfig()).Image() + isi, err := client.ImageStreamImports(oc.Namespace()).Create(&imageapi.ImageStreamImport{ + ObjectMeta: metav1.ObjectMeta{ + Name: "1", + }, + Spec: imageapi.ImageStreamImportSpec{ + Import: true, + Images: []imageapi.ImageImportSpec{ + { + From: kapi.ObjectReference{Kind: "DockerImage", Name: "busybox:latest"}, + To: &kapi.LocalObjectReference{Name: "busybox"}, + }, + { + From: kapi.ObjectReference{Kind: "DockerImage", Name: "mysql:latest"}, + To: &kapi.LocalObjectReference{Name: "mysql"}, + }, + }, + }, + }) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(isi.Status.Images).To(o.HaveLen(2)) + for _, image := range isi.Status.Images { + o.Expect(image.Image).ToNot(o.BeNil(), fmt.Sprintf("image %s %#v", image.Tag, image.Status)) + } + + // TODO: we may race here with the cache, if this is a problem, loop + g.By("verifying that layers for imported images are correct") + var busyboxLayers []string + for i := 0; ; i++ { + layers, err := client.ImageStreams(oc.Namespace()).Layers("1", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + for i, image := range isi.Status.Images { + l, ok := layers.Images[image.Image.Name] + o.Expect(ok).To(o.BeTrue()) + o.Expect(len(l.Layers)).To(o.BeNumerically(">", 0)) + o.Expect(l.Manifest).ToNot(o.BeNil()) + for _, layerID := range l.Layers { + o.Expect(layers.Blobs).To(o.HaveKey(layerID)) + o.Expect(layers.Blobs[layerID].MediaType).NotTo(o.BeEmpty()) + } + if i == 0 { + busyboxLayers = l.Layers + } + } + if len(busyboxLayers) > 0 { + break + } + time.Sleep(time.Second) + o.Expect(i).To(o.BeNumerically("<", 10), "Timed out waiting for layers to have expected data, got\n%#v\n%#v", layers, isi.Status.Images) + } + + _, err = client.ImageStreams(oc.Namespace()).Create(&imageapi.ImageStream{ + ObjectMeta: metav1.ObjectMeta{ + Name: "output", + }, + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + layers, err := client.ImageStreams(oc.Namespace()).Layers("output", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(layers.Images).To(o.BeEmpty()) + o.Expect(layers.Blobs).To(o.BeEmpty()) + + _, err = client.ImageStreams(oc.Namespace()).Layers("doesnotexist", metav1.GetOptions{}) + o.Expect(err).To(o.HaveOccurred()) + o.Expect(errors.IsNotFound(err)).To(o.BeTrue()) + + dockerfile := ` +FROM a +RUN mkdir -p /var/lib && echo "a" > /var/lib/file +` + + g.By("running a build based on our tagged layer") + buildClient := buildclientset.NewForConfigOrDie(oc.UserConfig()).Build() + _, err = buildClient.Builds(oc.Namespace()).Create(&buildapi.Build{ + ObjectMeta: metav1.ObjectMeta{ + Name: "output", + }, + Spec: buildapi.BuildSpec{ + CommonSpec: buildapi.CommonSpec{ + Source: buildapi.BuildSource{ + Dockerfile: &dockerfile, + }, + Strategy: buildapi.BuildStrategy{ + DockerStrategy: &buildapi.DockerBuildStrategy{ + From: &kapi.ObjectReference{Kind: "ImageStreamTag", Name: "1:busybox"}, + }, + }, + Output: buildapi.BuildOutput{ + To: &kapi.ObjectReference{Kind: "ImageStreamTag", Name: "output:latest"}, + }, + }, + }, + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + newNamespace := oc.CreateProject() + ns = append(ns) + + g.By("waiting for the build to finish") + var lastBuild *buildapi.Build + err = wait.Poll(time.Second, time.Minute, func() (bool, error) { + build, err := buildClient.Builds(oc.Namespace()).Get("output", metav1.GetOptions{}) + if err != nil { + return false, err + } + o.Expect(build.Status.Phase).NotTo(o.Or(o.Equal(buildapi.BuildPhaseFailed), o.Equal(buildapi.BuildPhaseError), o.Equal(buildapi.BuildPhaseCancelled))) + lastBuild = build + return build.Status.Phase == buildapi.BuildPhaseComplete, nil + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("checking the layers for the built image") + layers, err = client.ImageStreams(oc.Namespace()).Layers("output", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + to := lastBuild.Status.Output.To + o.Expect(to).NotTo(o.BeNil()) + o.Expect(layers.Images).To(o.HaveKey(to.ImageDigest)) + builtImageLayers := layers.Images[to.ImageDigest] + o.Expect(len(builtImageLayers.Layers)).To(o.Equal(len(busyboxLayers)+1), fmt.Sprintf("%#v", layers.Images)) + for i := range busyboxLayers { + o.Expect(busyboxLayers[i]).To(o.Equal(builtImageLayers.Layers[i])) + } + + g.By("tagging the built image into another namespace") + _, err = client.ImageStreamTags(newNamespace).Create(&imageapi.ImageStreamTag{ + ObjectMeta: metav1.ObjectMeta{ + Name: "output:latest", + }, + Tag: &imageapi.TagReference{ + Name: "copied", + From: &kapi.ObjectReference{Kind: "ImageStreamTag", Namespace: oc.Namespace(), Name: "output:latest"}, + }, + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("checking that the image shows up in the other namespace") + layers, err = client.ImageStreams(newNamespace).Layers("output", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(layers.Images).To(o.HaveKey(to.ImageDigest)) + o.Expect(layers.Images[to.ImageDigest]).To(o.Equal(builtImageLayers)) + }) +}) diff --git a/test/extended/util/cli.go b/test/extended/util/cli.go index 2f35772460d3..28b02f40cf1f 100644 --- a/test/extended/util/cli.go +++ b/test/extended/util/cli.go @@ -182,6 +182,32 @@ func (c *CLI) SetupProject() { o.Expect(err).NotTo(o.HaveOccurred()) } +// SetupProject creates a new project and assign a random user to the project. +// All resources will be then created within this project. +func (c *CLI) CreateProject() string { + newNamespace := names.SimpleNameGenerator.GenerateName(fmt.Sprintf("e2e-test-%s-", c.kubeFramework.BaseName)) + e2e.Logf("Creating project %q", newNamespace) + _, err := c.ProjectClient().Project().ProjectRequests().Create(&projectapi.ProjectRequest{ + ObjectMeta: metav1.ObjectMeta{Name: newNamespace}, + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + // TODO: remove when https://github.com/kubernetes/kubernetes/pull/62606 merges and is in origin + c.namespacesToDelete = append(c.namespacesToDelete, newNamespace) + + e2e.Logf("Waiting on permissions in project %q ...", newNamespace) + err = WaitForSelfSAR(1*time.Second, 60*time.Second, c.KubeClient(), authorizationapiv1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &authorizationapiv1.ResourceAttributes{ + Namespace: newNamespace, + Verb: "create", + Group: "", + Resource: "pods", + }, + }) + o.Expect(err).NotTo(o.HaveOccurred()) + return newNamespace +} + // TeardownProject removes projects created by this test. func (c *CLI) TeardownProject() { if g.CurrentGinkgoTestDescription().Failed && e2e.TestContext.DumpLogsOnFailure { @@ -191,7 +217,7 @@ func (c *CLI) TeardownProject() { if len(c.configPath) > 0 { os.Remove(c.configPath) } - if len(c.namespacesToDelete) > 0 { + if e2e.TestContext.DeleteNamespace && len(c.namespacesToDelete) > 0 { timeout := e2e.DefaultNamespaceDeletionTimeout if c.kubeFramework.NamespaceDeletionTimeout != 0 { timeout = c.kubeFramework.NamespaceDeletionTimeout @@ -214,11 +240,7 @@ func (c *CLI) RESTMapper() meta.RESTMapper { } func (c *CLI) AppsClient() appsclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.configPath, nil) - if err != nil { - FatalErr(err) - } - client, err := appsclientset.NewForConfig(clientConfig) + client, err := appsclientset.NewForConfig(c.UserConfig()) if err != nil { FatalErr(err) } @@ -226,11 +248,7 @@ func (c *CLI) AppsClient() appsclientset.Interface { } func (c *CLI) AuthorizationClient() authorizationclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.configPath, nil) - if err != nil { - FatalErr(err) - } - client, err := authorizationclientset.NewForConfig(clientConfig) + client, err := authorizationclientset.NewForConfig(c.UserConfig()) if err != nil { FatalErr(err) } @@ -238,11 +256,7 @@ func (c *CLI) AuthorizationClient() authorizationclientset.Interface { } func (c *CLI) BuildClient() buildclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.configPath, nil) - if err != nil { - FatalErr(err) - } - client, err := buildclientset.NewForConfig(clientConfig) + client, err := buildclientset.NewForConfig(c.UserConfig()) if err != nil { FatalErr(err) } @@ -250,11 +264,7 @@ func (c *CLI) BuildClient() buildclientset.Interface { } func (c *CLI) ImageClient() imageclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.configPath, nil) - if err != nil { - FatalErr(err) - } - client, err := imageclientset.NewForConfig(clientConfig) + client, err := imageclientset.NewForConfig(c.UserConfig()) if err != nil { FatalErr(err) } @@ -262,11 +272,7 @@ func (c *CLI) ImageClient() imageclientset.Interface { } func (c *CLI) ProjectClient() projectclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.configPath, nil) - if err != nil { - FatalErr(err) - } - client, err := projectclientset.NewForConfig(clientConfig) + client, err := projectclientset.NewForConfig(c.UserConfig()) if err != nil { FatalErr(err) } @@ -274,11 +280,7 @@ func (c *CLI) ProjectClient() projectclientset.Interface { } func (c *CLI) RouteClient() routeclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.configPath, nil) - if err != nil { - FatalErr(err) - } - client, err := routeclientset.NewForConfig(clientConfig) + client, err := routeclientset.NewForConfig(c.UserConfig()) if err != nil { FatalErr(err) } @@ -288,11 +290,7 @@ func (c *CLI) RouteClient() routeclientset.Interface { // Client provides an OpenShift client for the current user. If the user is not // set, then it provides client for the cluster admin user func (c *CLI) TemplateClient() templateclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.configPath, nil) - if err != nil { - FatalErr(err) - } - client, err := templateclientset.NewForConfig(clientConfig) + client, err := templateclientset.NewForConfig(c.UserConfig()) if err != nil { FatalErr(err) } @@ -300,11 +298,7 @@ func (c *CLI) TemplateClient() templateclientset.Interface { } func (c *CLI) UserClient() userclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.configPath, nil) - if err != nil { - FatalErr(err) - } - client, err := userclientset.NewForConfig(clientConfig) + client, err := userclientset.NewForConfig(c.UserConfig()) if err != nil { FatalErr(err) } @@ -312,11 +306,7 @@ func (c *CLI) UserClient() userclientset.Interface { } func (c *CLI) AdminAppsClient() appsclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.adminConfigPath, nil) - if err != nil { - FatalErr(err) - } - client, err := appsclientset.NewForConfig(clientConfig) + client, err := appsclientset.NewForConfig(c.AdminConfig()) if err != nil { FatalErr(err) } @@ -324,11 +314,7 @@ func (c *CLI) AdminAppsClient() appsclientset.Interface { } func (c *CLI) AdminAuthorizationClient() authorizationclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.adminConfigPath, nil) - if err != nil { - FatalErr(err) - } - client, err := authorizationclientset.NewForConfig(clientConfig) + client, err := authorizationclientset.NewForConfig(c.AdminConfig()) if err != nil { FatalErr(err) } @@ -336,11 +322,7 @@ func (c *CLI) AdminAuthorizationClient() authorizationclientset.Interface { } func (c *CLI) AdminBuildClient() buildclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.adminConfigPath, nil) - if err != nil { - FatalErr(err) - } - client, err := buildclientset.NewForConfig(clientConfig) + client, err := buildclientset.NewForConfig(c.AdminConfig()) if err != nil { FatalErr(err) } @@ -348,11 +330,7 @@ func (c *CLI) AdminBuildClient() buildclientset.Interface { } func (c *CLI) AdminImageClient() imageclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.adminConfigPath, nil) - if err != nil { - FatalErr(err) - } - client, err := imageclientset.NewForConfig(clientConfig) + client, err := imageclientset.NewForConfig(c.AdminConfig()) if err != nil { FatalErr(err) } @@ -360,11 +338,7 @@ func (c *CLI) AdminImageClient() imageclientset.Interface { } func (c *CLI) AdminProjectClient() projectclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.adminConfigPath, nil) - if err != nil { - FatalErr(err) - } - client, err := projectclientset.NewForConfig(clientConfig) + client, err := projectclientset.NewForConfig(c.AdminConfig()) if err != nil { FatalErr(err) } @@ -372,11 +346,7 @@ func (c *CLI) AdminProjectClient() projectclientset.Interface { } func (c *CLI) AdminRouteClient() routeclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.adminConfigPath, nil) - if err != nil { - FatalErr(err) - } - client, err := routeclientset.NewForConfig(clientConfig) + client, err := routeclientset.NewForConfig(c.AdminConfig()) if err != nil { FatalErr(err) } @@ -385,11 +355,7 @@ func (c *CLI) AdminRouteClient() routeclientset.Interface { // AdminClient provides an OpenShift client for the cluster admin user. func (c *CLI) AdminTemplateClient() templateclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.adminConfigPath, nil) - if err != nil { - FatalErr(err) - } - client, err := templateclientset.NewForConfig(clientConfig) + client, err := templateclientset.NewForConfig(c.AdminConfig()) if err != nil { FatalErr(err) } @@ -397,11 +363,7 @@ func (c *CLI) AdminTemplateClient() templateclientset.Interface { } func (c *CLI) AdminUserClient() userclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.adminConfigPath, nil) - if err != nil { - FatalErr(err) - } - client, err := userclientset.NewForConfig(clientConfig) + client, err := userclientset.NewForConfig(c.AdminConfig()) if err != nil { FatalErr(err) } @@ -409,11 +371,7 @@ func (c *CLI) AdminUserClient() userclientset.Interface { } func (c *CLI) AdminSecurityClient() securityclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.adminConfigPath, nil) - if err != nil { - FatalErr(err) - } - client, err := securityclientset.NewForConfig(clientConfig) + client, err := securityclientset.NewForConfig(c.AdminConfig()) if err != nil { FatalErr(err) } @@ -422,20 +380,12 @@ func (c *CLI) AdminSecurityClient() securityclientset.Interface { // KubeClient provides a Kubernetes client for the current namespace func (c *CLI) KubeClient() kclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.configPath, nil) - if err != nil { - FatalErr(err) - } - return kclientset.NewForConfigOrDie(clientConfig) + return kclientset.NewForConfigOrDie(c.UserConfig()) } // KubeClient provides a Kubernetes client for the current namespace func (c *CLI) InternalKubeClient() kinternalclientset.Interface { - clientConfig, err := configapi.GetClientConfig(c.configPath, nil) - if err != nil { - FatalErr(err) - } - return kinternalclientset.NewForConfigOrDie(clientConfig) + return kinternalclientset.NewForConfigOrDie(c.UserConfig()) } // AdminKubeClient provides a Kubernetes client for the cluster admin user. @@ -448,6 +398,14 @@ func (c *CLI) InternalAdminKubeClient() kinternalclientset.Interface { return kinternalclientset.NewForConfigOrDie(c.AdminConfig()) } +func (c *CLI) UserConfig() *restclient.Config { + clientConfig, err := configapi.GetClientConfig(c.configPath, nil) + if err != nil { + FatalErr(err) + } + return clientConfig +} + func (c *CLI) AdminConfig() *restclient.Config { clientConfig, err := configapi.GetClientConfig(c.adminConfigPath, nil) if err != nil { diff --git a/test/integration/etcd_storage_path_test.go b/test/integration/etcd_storage_path_test.go index 68f764a96784..01d4bfaa19a3 100644 --- a/test/integration/etcd_storage_path_test.go +++ b/test/integration/etcd_storage_path_test.go @@ -879,6 +879,7 @@ var ephemeralWhiteList = createEphemeralWhiteList( gvr("image.openshift.io", "v1", "imagestreamimages"), // not stored in etcd gvr("", "v1", "imagestreammappings"), // not stored in etcd gvr("image.openshift.io", "v1", "imagestreammappings"), // not stored in etcd + gvr("image.openshift.io", "v1", "imagestreamlayerses"), // not stored in etcd // -- // github.com/openshift/origin/pkg/project/apis/project/v1 diff --git a/test/integration/master_routes_test.go b/test/integration/master_routes_test.go index baae70fcaa95..3276bfa69b22 100644 --- a/test/integration/master_routes_test.go +++ b/test/integration/master_routes_test.go @@ -117,6 +117,7 @@ var expectedIndex = []string{ "/healthz/poststarthook/bootstrap-controller", "/healthz/poststarthook/ca-registration", "/healthz/poststarthook/generic-apiserver-start-informers", + "/healthz/poststarthook/image.openshift.io-apiserver-caches", "/healthz/poststarthook/kube-apiserver-autoregistration", "/healthz/poststarthook/oauth.openshift.io-StartOAuthClientsBootstrapping", "/healthz/poststarthook/openshift.io-RESTMapper", diff --git a/vendor/github.com/openshift/api/authorization/install.go b/vendor/github.com/openshift/api/authorization/install.go index 2cb6e4369a49..08ecc95f49d7 100644 --- a/vendor/github.com/openshift/api/authorization/install.go +++ b/vendor/github.com/openshift/api/authorization/install.go @@ -1,4 +1,4 @@ -package apps +package authorization import ( "k8s.io/apimachinery/pkg/runtime" diff --git a/vendor/github.com/openshift/api/image/v1/generated.pb.go b/vendor/github.com/openshift/api/image/v1/generated.pb.go index cf5b1a799525..cec6c04de6fd 100644 --- a/vendor/github.com/openshift/api/image/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/image/v1/generated.pb.go @@ -11,9 +11,11 @@ It has these top-level messages: DockerImageReference Image + ImageBlobReferences ImageImportSpec ImageImportStatus ImageLayer + ImageLayerData ImageList ImageLookupPolicy ImageSignature @@ -22,6 +24,7 @@ ImageStreamImport ImageStreamImportSpec ImageStreamImportStatus + ImageStreamLayers ImageStreamList ImageStreamMapping ImageStreamSpec @@ -76,130 +79,144 @@ func (m *Image) Reset() { *m = Image{} } func (*Image) ProtoMessage() {} func (*Image) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *ImageBlobReferences) Reset() { *m = ImageBlobReferences{} } +func (*ImageBlobReferences) ProtoMessage() {} +func (*ImageBlobReferences) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + func (m *ImageImportSpec) Reset() { *m = ImageImportSpec{} } func (*ImageImportSpec) ProtoMessage() {} -func (*ImageImportSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*ImageImportSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *ImageImportStatus) Reset() { *m = ImageImportStatus{} } func (*ImageImportStatus) ProtoMessage() {} -func (*ImageImportStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ImageImportStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *ImageLayer) Reset() { *m = ImageLayer{} } func (*ImageLayer) ProtoMessage() {} -func (*ImageLayer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*ImageLayer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *ImageLayerData) Reset() { *m = ImageLayerData{} } +func (*ImageLayerData) ProtoMessage() {} +func (*ImageLayerData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *ImageList) Reset() { *m = ImageList{} } func (*ImageList) ProtoMessage() {} -func (*ImageList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*ImageList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *ImageLookupPolicy) Reset() { *m = ImageLookupPolicy{} } func (*ImageLookupPolicy) ProtoMessage() {} -func (*ImageLookupPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*ImageLookupPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *ImageSignature) Reset() { *m = ImageSignature{} } func (*ImageSignature) ProtoMessage() {} -func (*ImageSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*ImageSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *ImageStream) Reset() { *m = ImageStream{} } func (*ImageStream) ProtoMessage() {} -func (*ImageStream) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*ImageStream) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *ImageStreamImage) Reset() { *m = ImageStreamImage{} } func (*ImageStreamImage) ProtoMessage() {} -func (*ImageStreamImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*ImageStreamImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *ImageStreamImport) Reset() { *m = ImageStreamImport{} } func (*ImageStreamImport) ProtoMessage() {} -func (*ImageStreamImport) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*ImageStreamImport) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *ImageStreamImportSpec) Reset() { *m = ImageStreamImportSpec{} } func (*ImageStreamImportSpec) ProtoMessage() {} -func (*ImageStreamImportSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*ImageStreamImportSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ImageStreamImportStatus) Reset() { *m = ImageStreamImportStatus{} } func (*ImageStreamImportStatus) ProtoMessage() {} func (*ImageStreamImportStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptorGenerated, []int{14} } +func (m *ImageStreamLayers) Reset() { *m = ImageStreamLayers{} } +func (*ImageStreamLayers) ProtoMessage() {} +func (*ImageStreamLayers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + func (m *ImageStreamList) Reset() { *m = ImageStreamList{} } func (*ImageStreamList) ProtoMessage() {} -func (*ImageStreamList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*ImageStreamList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *ImageStreamMapping) Reset() { *m = ImageStreamMapping{} } func (*ImageStreamMapping) ProtoMessage() {} -func (*ImageStreamMapping) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ImageStreamMapping) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *ImageStreamSpec) Reset() { *m = ImageStreamSpec{} } func (*ImageStreamSpec) ProtoMessage() {} -func (*ImageStreamSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ImageStreamSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *ImageStreamStatus) Reset() { *m = ImageStreamStatus{} } func (*ImageStreamStatus) ProtoMessage() {} -func (*ImageStreamStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ImageStreamStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *ImageStreamTag) Reset() { *m = ImageStreamTag{} } func (*ImageStreamTag) ProtoMessage() {} -func (*ImageStreamTag) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ImageStreamTag) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *ImageStreamTagList) Reset() { *m = ImageStreamTagList{} } func (*ImageStreamTagList) ProtoMessage() {} -func (*ImageStreamTagList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*ImageStreamTagList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *NamedTagEventList) Reset() { *m = NamedTagEventList{} } func (*NamedTagEventList) ProtoMessage() {} -func (*NamedTagEventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*NamedTagEventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *RepositoryImportSpec) Reset() { *m = RepositoryImportSpec{} } func (*RepositoryImportSpec) ProtoMessage() {} -func (*RepositoryImportSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*RepositoryImportSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *RepositoryImportStatus) Reset() { *m = RepositoryImportStatus{} } func (*RepositoryImportStatus) ProtoMessage() {} -func (*RepositoryImportStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*RepositoryImportStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *SignatureCondition) Reset() { *m = SignatureCondition{} } func (*SignatureCondition) ProtoMessage() {} -func (*SignatureCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*SignatureCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *SignatureGenericEntity) Reset() { *m = SignatureGenericEntity{} } func (*SignatureGenericEntity) ProtoMessage() {} -func (*SignatureGenericEntity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*SignatureGenericEntity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *SignatureIssuer) Reset() { *m = SignatureIssuer{} } func (*SignatureIssuer) ProtoMessage() {} -func (*SignatureIssuer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*SignatureIssuer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *SignatureSubject) Reset() { *m = SignatureSubject{} } func (*SignatureSubject) ProtoMessage() {} -func (*SignatureSubject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*SignatureSubject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *TagEvent) Reset() { *m = TagEvent{} } func (*TagEvent) ProtoMessage() {} -func (*TagEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*TagEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *TagEventCondition) Reset() { *m = TagEventCondition{} } func (*TagEventCondition) ProtoMessage() {} -func (*TagEventCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*TagEventCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *TagImportPolicy) Reset() { *m = TagImportPolicy{} } func (*TagImportPolicy) ProtoMessage() {} -func (*TagImportPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*TagImportPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *TagReference) Reset() { *m = TagReference{} } func (*TagReference) ProtoMessage() {} -func (*TagReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*TagReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *TagReferencePolicy) Reset() { *m = TagReferencePolicy{} } func (*TagReferencePolicy) ProtoMessage() {} -func (*TagReferencePolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*TagReferencePolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func init() { proto.RegisterType((*DockerImageReference)(nil), "github.com.openshift.api.image.v1.DockerImageReference") proto.RegisterType((*Image)(nil), "github.com.openshift.api.image.v1.Image") + proto.RegisterType((*ImageBlobReferences)(nil), "github.com.openshift.api.image.v1.ImageBlobReferences") proto.RegisterType((*ImageImportSpec)(nil), "github.com.openshift.api.image.v1.ImageImportSpec") proto.RegisterType((*ImageImportStatus)(nil), "github.com.openshift.api.image.v1.ImageImportStatus") proto.RegisterType((*ImageLayer)(nil), "github.com.openshift.api.image.v1.ImageLayer") + proto.RegisterType((*ImageLayerData)(nil), "github.com.openshift.api.image.v1.ImageLayerData") proto.RegisterType((*ImageList)(nil), "github.com.openshift.api.image.v1.ImageList") proto.RegisterType((*ImageLookupPolicy)(nil), "github.com.openshift.api.image.v1.ImageLookupPolicy") proto.RegisterType((*ImageSignature)(nil), "github.com.openshift.api.image.v1.ImageSignature") @@ -208,6 +225,7 @@ func init() { proto.RegisterType((*ImageStreamImport)(nil), "github.com.openshift.api.image.v1.ImageStreamImport") proto.RegisterType((*ImageStreamImportSpec)(nil), "github.com.openshift.api.image.v1.ImageStreamImportSpec") proto.RegisterType((*ImageStreamImportStatus)(nil), "github.com.openshift.api.image.v1.ImageStreamImportStatus") + proto.RegisterType((*ImageStreamLayers)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers") proto.RegisterType((*ImageStreamList)(nil), "github.com.openshift.api.image.v1.ImageStreamList") proto.RegisterType((*ImageStreamMapping)(nil), "github.com.openshift.api.image.v1.ImageStreamMapping") proto.RegisterType((*ImageStreamSpec)(nil), "github.com.openshift.api.image.v1.ImageStreamSpec") @@ -351,6 +369,45 @@ func (m *Image) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ImageBlobReferences) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageBlobReferences) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Layers) > 0 { + for _, s := range m.Layers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Manifest != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Manifest))) + i += copy(dAtA[i:], *m.Manifest) + } + return i, nil +} + func (m *ImageImportSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -480,6 +537,33 @@ func (m *ImageLayer) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ImageLayerData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLayerData) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LayerSize != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.LayerSize)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) + i += copy(dAtA[i:], m.MediaType) + return i, nil +} + func (m *ImageList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -864,6 +948,94 @@ func (m *ImageStreamImportStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ImageStreamLayers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamLayers) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n25, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + if len(m.Blobs) > 0 { + keysForBlobs := make([]string, 0, len(m.Blobs)) + for k := range m.Blobs { + keysForBlobs = append(keysForBlobs, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs) + for _, k := range keysForBlobs { + dAtA[i] = 0x12 + i++ + v := m.Blobs[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n26, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + } + if len(m.Images) > 0 { + keysForImages := make([]string, 0, len(m.Images)) + for k := range m.Images { + keysForImages = append(keysForImages, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForImages) + for _, k := range keysForImages { + dAtA[i] = 0x1a + i++ + v := m.Images[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n27, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + } + return i, nil +} + func (m *ImageStreamList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -882,11 +1054,11 @@ func (m *ImageStreamList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n25, err := m.ListMeta.MarshalTo(dAtA[i:]) + n28, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n28 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -920,19 +1092,19 @@ func (m *ImageStreamMapping) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n26, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n29, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n29 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Image.Size())) - n27, err := m.Image.MarshalTo(dAtA[i:]) + n30, err := m.Image.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n30 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) @@ -974,11 +1146,11 @@ func (m *ImageStreamSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LookupPolicy.Size())) - n28, err := m.LookupPolicy.MarshalTo(dAtA[i:]) + n31, err := m.LookupPolicy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n31 return i, nil } @@ -1038,20 +1210,20 @@ func (m *ImageStreamTag) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n29, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n32, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n32 if m.Tag != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Tag.Size())) - n30, err := m.Tag.MarshalTo(dAtA[i:]) + n33, err := m.Tag.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n33 } dAtA[i] = 0x18 i++ @@ -1071,19 +1243,19 @@ func (m *ImageStreamTag) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Image.Size())) - n31, err := m.Image.MarshalTo(dAtA[i:]) + n34, err := m.Image.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n34 dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LookupPolicy.Size())) - n32, err := m.LookupPolicy.MarshalTo(dAtA[i:]) + n35, err := m.LookupPolicy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n35 return i, nil } @@ -1105,11 +1277,11 @@ func (m *ImageStreamTagList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n33, err := m.ListMeta.MarshalTo(dAtA[i:]) + n36, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n36 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1189,19 +1361,19 @@ func (m *RepositoryImportSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) - n34, err := m.From.MarshalTo(dAtA[i:]) + n37, err := m.From.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n37 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ImportPolicy.Size())) - n35, err := m.ImportPolicy.MarshalTo(dAtA[i:]) + n38, err := m.ImportPolicy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n38 dAtA[i] = 0x18 i++ if m.IncludeManifest { @@ -1213,11 +1385,11 @@ func (m *RepositoryImportSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ReferencePolicy.Size())) - n36, err := m.ReferencePolicy.MarshalTo(dAtA[i:]) + n39, err := m.ReferencePolicy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n39 return i, nil } @@ -1239,11 +1411,11 @@ func (m *RepositoryImportStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n37, err := m.Status.MarshalTo(dAtA[i:]) + n40, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n40 if len(m.Images) > 0 { for _, msg := range m.Images { dAtA[i] = 0x12 @@ -1300,19 +1472,19 @@ func (m *SignatureCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n38, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + n41, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n41 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n39, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n42, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n42 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -1368,11 +1540,11 @@ func (m *SignatureIssuer) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SignatureGenericEntity.Size())) - n40, err := m.SignatureGenericEntity.MarshalTo(dAtA[i:]) + n43, err := m.SignatureGenericEntity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n43 return i, nil } @@ -1394,11 +1566,11 @@ func (m *SignatureSubject) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SignatureGenericEntity.Size())) - n41, err := m.SignatureGenericEntity.MarshalTo(dAtA[i:]) + n44, err := m.SignatureGenericEntity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n44 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicKeyID))) @@ -1424,11 +1596,11 @@ func (m *TagEvent) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Created.Size())) - n42, err := m.Created.MarshalTo(dAtA[i:]) + n45, err := m.Created.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n45 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference))) @@ -1469,11 +1641,11 @@ func (m *TagEventCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n43, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n46, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n46 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -1567,11 +1739,11 @@ func (m *TagReference) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) - n44, err := m.From.MarshalTo(dAtA[i:]) + n47, err := m.From.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n47 } dAtA[i] = 0x20 i++ @@ -1589,19 +1761,19 @@ func (m *TagReference) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ImportPolicy.Size())) - n45, err := m.ImportPolicy.MarshalTo(dAtA[i:]) + n48, err := m.ImportPolicy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n48 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ReferencePolicy.Size())) - n46, err := m.ReferencePolicy.MarshalTo(dAtA[i:]) + n49, err := m.ReferencePolicy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n49 return i, nil } @@ -1708,6 +1880,22 @@ func (m *Image) Size() (n int) { return n } +func (m *ImageBlobReferences) Size() (n int) { + var l int + _ = l + if len(m.Layers) > 0 { + for _, s := range m.Layers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Manifest != nil { + l = len(*m.Manifest) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *ImageImportSpec) Size() (n int) { var l int _ = l @@ -1750,6 +1938,17 @@ func (m *ImageLayer) Size() (n int) { return n } +func (m *ImageLayerData) Size() (n int) { + var l int + _ = l + if m.LayerSize != nil { + n += 1 + sovGenerated(uint64(*m.LayerSize)) + } + l = len(m.MediaType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ImageList) Size() (n int) { var l int _ = l @@ -1884,6 +2083,32 @@ func (m *ImageStreamImportStatus) Size() (n int) { return n } +func (m *ImageStreamLayers) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Blobs) > 0 { + for k, v := range m.Blobs { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Images) > 0 { + for k, v := range m.Images { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + func (m *ImageStreamList) Size() (n int) { var l int _ = l @@ -2198,6 +2423,17 @@ func (this *Image) String() string { }, "") return s } +func (this *ImageBlobReferences) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageBlobReferences{`, + `Layers:` + fmt.Sprintf("%v", this.Layers) + `,`, + `Manifest:` + valueToStringGenerated(this.Manifest) + `,`, + `}`, + }, "") + return s +} func (this *ImageImportSpec) String() string { if this == nil { return "nil" @@ -2236,6 +2472,17 @@ func (this *ImageLayer) String() string { }, "") return s } +func (this *ImageLayerData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLayerData{`, + `LayerSize:` + valueToStringGenerated(this.LayerSize) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `}`, + }, "") + return s +} func (this *ImageList) String() string { if this == nil { return "nil" @@ -2344,6 +2591,38 @@ func (this *ImageStreamImportStatus) String() string { }, "") return s } +func (this *ImageStreamLayers) String() string { + if this == nil { + return "nil" + } + keysForBlobs := make([]string, 0, len(this.Blobs)) + for k := range this.Blobs { + keysForBlobs = append(keysForBlobs, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs) + mapStringForBlobs := "map[string]ImageLayerData{" + for _, k := range keysForBlobs { + mapStringForBlobs += fmt.Sprintf("%v: %v,", k, this.Blobs[k]) + } + mapStringForBlobs += "}" + keysForImages := make([]string, 0, len(this.Images)) + for k := range this.Images { + keysForImages = append(keysForImages, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForImages) + mapStringForImages := "map[string]ImageBlobReferences{" + for _, k := range keysForImages { + mapStringForImages += fmt.Sprintf("%v: %v,", k, this.Images[k]) + } + mapStringForImages += "}" + s := strings.Join([]string{`&ImageStreamLayers{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Blobs:` + mapStringForBlobs + `,`, + `Images:` + mapStringForImages + `,`, + `}`, + }, "") + return s +} func (this *ImageStreamList) String() string { if this == nil { return "nil" @@ -3125,7 +3404,7 @@ func (m *Image) Unmarshal(dAtA []byte) error { } return nil } -func (m *ImageImportSpec) Unmarshal(dAtA []byte) error { +func (m *ImageBlobReferences) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3148,17 +3427,17 @@ func (m *ImageImportSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ImageImportSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ImageBlobReferences: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ImageImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ImageBlobReferences: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Layers", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3168,27 +3447,26 @@ func (m *ImageImportSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Layers = append(m.Layers, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Manifest", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3198,32 +3476,142 @@ func (m *ImageImportSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.To == nil { - m.To = &k8s_io_api_core_v1.LocalObjectReference{} - } - if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + s := string(dAtA[iNdEx:postIndex]) + m.Manifest = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + if skippy < 0 { + return ErrInvalidLengthGenerated } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &k8s_io_api_core_v1.LocalObjectReference{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { @@ -3587,6 +3975,105 @@ func (m *ImageLayer) Unmarshal(dAtA []byte) error { } return nil } +func (m *ImageLayerData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLayerData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLayerData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LayerSize = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ImageList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -4854,6 +5341,328 @@ func (m *ImageStreamImportStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *ImageStreamLayers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamLayers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamLayers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blobs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Blobs == nil { + m.Blobs = make(map[string]ImageLayerData) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ImageLayerData{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Blobs[mapkey] = *mapvalue + } else { + var mapvalue ImageLayerData + m.Blobs[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Images == nil { + m.Images = make(map[string]ImageBlobReferences) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ImageBlobReferences{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Images[mapkey] = *mapvalue + } else { + var mapvalue ImageBlobReferences + m.Images[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ImageStreamList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -7660,148 +8469,158 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2284 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0x4d, 0x8c, 0x1b, 0x49, - 0x15, 0x4e, 0xbb, 0xed, 0xb1, 0xe7, 0x8d, 0x33, 0x93, 0xa9, 0x64, 0xb2, 0xde, 0x49, 0xd6, 0x13, - 0x3a, 0x64, 0x15, 0x20, 0xdb, 0x66, 0x66, 0xb3, 0x90, 0x04, 0x09, 0x36, 0x8e, 0xc3, 0xca, 0x30, - 0x43, 0x66, 0x6b, 0x4c, 0x0e, 0x51, 0x90, 0xa8, 0x69, 0xd7, 0xf4, 0x14, 0x63, 0x77, 0x5b, 0xdd, - 0xed, 0xd9, 0x9d, 0x08, 0x24, 0x0e, 0x68, 0xb5, 0x07, 0x0e, 0x70, 0xde, 0x23, 0x42, 0x88, 0x33, - 0x02, 0x71, 0x07, 0xa4, 0x88, 0x0b, 0xab, 0xe5, 0xb2, 0x17, 0x46, 0xc4, 0x70, 0xe6, 0xc6, 0x65, - 0x4f, 0xa8, 0xaa, 0xab, 0x7f, 0xdd, 0x9e, 0xb4, 0x43, 0xc6, 0x82, 0x9b, 0x5d, 0xef, 0xbd, 0xaf, - 0x5e, 0xbd, 0x57, 0xef, 0xa7, 0x5e, 0xc3, 0xba, 0xc9, 0xbc, 0xfd, 0xe1, 0xae, 0x6e, 0xd8, 0xfd, - 0x86, 0x3d, 0xa0, 0x96, 0xbb, 0xcf, 0xf6, 0xbc, 0x06, 0x19, 0xb0, 0x06, 0xeb, 0x13, 0x93, 0x36, - 0x0e, 0xd7, 0x1b, 0x26, 0xb5, 0xa8, 0x43, 0x3c, 0xda, 0xd5, 0x07, 0x8e, 0xed, 0xd9, 0xe8, 0x73, - 0x91, 0x88, 0x1e, 0x8a, 0xe8, 0x64, 0xc0, 0x74, 0x21, 0xa2, 0x1f, 0xae, 0xaf, 0xbe, 0x11, 0x43, - 0x35, 0x6d, 0xd3, 0x6e, 0x08, 0xc9, 0xdd, 0xe1, 0x9e, 0xf8, 0x27, 0xfe, 0x88, 0x5f, 0x3e, 0xe2, - 0xaa, 0x76, 0x70, 0xcb, 0xd5, 0x99, 0x2d, 0xb6, 0x35, 0x6c, 0x27, 0x6b, 0xd7, 0xd5, 0x9b, 0x11, - 0x4f, 0x9f, 0x18, 0xfb, 0xcc, 0xa2, 0xce, 0x51, 0x63, 0x70, 0x60, 0xf2, 0x05, 0xb7, 0xd1, 0xa7, - 0x1e, 0xc9, 0x92, 0x6a, 0x4c, 0x92, 0x72, 0x86, 0x96, 0xc7, 0xfa, 0x74, 0x4c, 0xe0, 0x2b, 0xcf, - 0x13, 0x70, 0x8d, 0x7d, 0xda, 0x27, 0x63, 0x72, 0x6f, 0x4e, 0x92, 0x1b, 0x7a, 0xac, 0xd7, 0x60, - 0x96, 0xe7, 0x7a, 0x4e, 0x5a, 0x48, 0xfb, 0x44, 0x81, 0x0b, 0x2d, 0xdb, 0x38, 0xa0, 0x4e, 0x9b, - 0x5b, 0x0e, 0xd3, 0x3d, 0xea, 0x50, 0xcb, 0xa0, 0xe8, 0x06, 0x54, 0x1c, 0x6a, 0x32, 0xd7, 0x73, - 0x8e, 0x6a, 0xca, 0x15, 0xe5, 0xfa, 0x7c, 0xf3, 0xdc, 0xd3, 0xe3, 0xb5, 0x33, 0xa3, 0xe3, 0xb5, - 0x0a, 0x96, 0xeb, 0x38, 0xe4, 0x40, 0x0d, 0x98, 0xb7, 0x48, 0x9f, 0xba, 0x03, 0x62, 0xd0, 0x5a, - 0x41, 0xb0, 0x2f, 0x4b, 0xf6, 0xf9, 0xef, 0x04, 0x04, 0x1c, 0xf1, 0xa0, 0x2b, 0x50, 0xe4, 0x7f, - 0x6a, 0xaa, 0xe0, 0xad, 0x4a, 0xde, 0x22, 0xe7, 0xc5, 0x82, 0x82, 0x5e, 0x03, 0xd5, 0x23, 0x66, - 0xad, 0x28, 0x18, 0x16, 0x24, 0x83, 0xda, 0x21, 0x26, 0xe6, 0xeb, 0x68, 0x15, 0x0a, 0xac, 0x55, - 0x2b, 0x09, 0x2a, 0x48, 0x6a, 0xa1, 0xdd, 0xc2, 0x05, 0xd6, 0xd2, 0xfe, 0x52, 0x86, 0x92, 0x38, - 0x0e, 0xfa, 0x3e, 0x54, 0xb8, 0x5f, 0xba, 0xc4, 0x23, 0xe2, 0x14, 0x0b, 0x1b, 0x5f, 0xd6, 0x7d, - 0x33, 0xe9, 0x71, 0x33, 0xe9, 0x83, 0x03, 0x93, 0x2f, 0xb8, 0x3a, 0xe7, 0xd6, 0x0f, 0xd7, 0xf5, - 0x07, 0xbb, 0x3f, 0xa0, 0x86, 0xb7, 0x45, 0x3d, 0xd2, 0x44, 0x12, 0x1d, 0xa2, 0x35, 0x1c, 0xa2, - 0xa2, 0x6d, 0xb8, 0xd0, 0xcd, 0xb0, 0x9f, 0x34, 0xc2, 0x65, 0x29, 0x9b, 0x69, 0x63, 0x9c, 0x29, - 0x89, 0x7e, 0x08, 0xe7, 0x63, 0xeb, 0x5b, 0x81, 0xfa, 0xaa, 0x50, 0xff, 0x8d, 0x89, 0xea, 0xcb, - 0xdb, 0xa1, 0x63, 0xf2, 0xde, 0xfd, 0xf7, 0x3d, 0x6a, 0xb9, 0xcc, 0xb6, 0x9a, 0x97, 0xe4, 0xfe, - 0xe7, 0x5b, 0xe3, 0x88, 0x38, 0x6b, 0x1b, 0xb4, 0x0b, 0xab, 0x19, 0xcb, 0x0f, 0xa9, 0xc3, 0xf1, - 0xa4, 0x37, 0x34, 0x89, 0xba, 0xda, 0x9a, 0xc8, 0x89, 0x4f, 0x40, 0x41, 0x5b, 0xc9, 0x13, 0x12, - 0x8b, 0xed, 0x51, 0xd7, 0x93, 0xce, 0xcc, 0x54, 0x59, 0xb2, 0xe0, 0x2c, 0x39, 0x74, 0x08, 0xcb, - 0xb1, 0xe5, 0x4d, 0x72, 0x44, 0x1d, 0xb7, 0x36, 0x77, 0x45, 0x15, 0xe6, 0x7a, 0x6e, 0xa6, 0xd0, - 0x23, 0xa9, 0xe6, 0xab, 0x72, 0xef, 0xe5, 0x56, 0x1a, 0x0f, 0x8f, 0x6f, 0x81, 0x28, 0x80, 0xcb, - 0x4c, 0x8b, 0x78, 0x43, 0x87, 0xba, 0xb5, 0xb2, 0xd8, 0x70, 0x3d, 0xef, 0x86, 0x3b, 0x81, 0x64, - 0x74, 0xbf, 0xc2, 0x25, 0x17, 0xc7, 0x80, 0xd1, 0x03, 0x58, 0x89, 0xed, 0x1d, 0x31, 0xd5, 0x2a, - 0x57, 0xd4, 0xeb, 0xd5, 0xe6, 0xab, 0xa3, 0xe3, 0xb5, 0x95, 0x56, 0x16, 0x03, 0xce, 0x96, 0x43, - 0xfb, 0x70, 0x39, 0xc3, 0x8c, 0x5b, 0xb4, 0xcb, 0x48, 0xe7, 0x68, 0x40, 0x6b, 0xf3, 0xc2, 0x0f, - 0x9f, 0x97, 0x6a, 0x5d, 0x6e, 0x9d, 0xc0, 0x8b, 0x4f, 0x44, 0x42, 0xef, 0x24, 0x3c, 0x73, 0xcf, - 0xb6, 0xf6, 0x98, 0x59, 0x03, 0x01, 0x9f, 0x65, 0x6a, 0x9f, 0x01, 0x8f, 0xcb, 0x68, 0x7f, 0x52, - 0x61, 0x49, 0xfc, 0x6f, 0xf7, 0x07, 0xb6, 0xe3, 0xed, 0x0c, 0xa8, 0x81, 0xee, 0x43, 0x71, 0xcf, - 0xb1, 0xfb, 0x32, 0xae, 0xaf, 0xc6, 0x02, 0x43, 0xe7, 0x19, 0x3c, 0x8a, 0xe2, 0x30, 0xb4, 0xa2, - 0x3c, 0xf3, 0x4d, 0xc7, 0xee, 0x63, 0x21, 0x8e, 0xde, 0x86, 0x82, 0x67, 0x8b, 0x70, 0x5d, 0xd8, - 0xb8, 0x9e, 0x05, 0xb2, 0x69, 0x1b, 0xa4, 0x97, 0x46, 0x9a, 0xe3, 0xe9, 0xa6, 0x63, 0xe3, 0x82, - 0x67, 0xa3, 0x1e, 0x54, 0x99, 0x50, 0x6b, 0xdb, 0xee, 0x31, 0xe3, 0x48, 0x46, 0xea, 0x46, 0x8e, - 0x9b, 0xd0, 0x21, 0x66, 0x3b, 0x26, 0xd9, 0xbc, 0x20, 0xf5, 0xab, 0xc6, 0x57, 0x71, 0x02, 0x1d, - 0xdd, 0x85, 0x25, 0x66, 0x19, 0xbd, 0x61, 0x37, 0x0a, 0x1c, 0x1e, 0x95, 0x95, 0xe6, 0x2b, 0x52, - 0x78, 0xa9, 0x9d, 0x24, 0xe3, 0x34, 0x3f, 0x7a, 0x1f, 0x96, 0x9c, 0xe0, 0x24, 0x52, 0xe7, 0x92, - 0xd0, 0xf9, 0xad, 0x7c, 0x3a, 0xe3, 0xa4, 0x70, 0xb4, 0x73, 0x8a, 0x80, 0xd3, 0xdb, 0x68, 0x7f, - 0x55, 0x60, 0x39, 0xee, 0x47, 0x8f, 0x78, 0x43, 0x17, 0x75, 0x60, 0xce, 0x15, 0xbf, 0xa4, 0x2f, - 0x6f, 0xe4, 0xcb, 0xd1, 0xbe, 0x74, 0x73, 0x51, 0xee, 0x3e, 0xe7, 0xff, 0xc7, 0x12, 0x0b, 0xb5, - 0xa1, 0x24, 0x94, 0x0e, 0x7d, 0x9b, 0x33, 0x32, 0x9b, 0xf3, 0xa3, 0xe3, 0x35, 0xbf, 0x7e, 0x60, - 0x1f, 0x21, 0xa8, 0x45, 0x6a, 0x76, 0x2d, 0xd2, 0x3e, 0x50, 0x00, 0xa2, 0xc4, 0x10, 0xd6, 0x36, - 0x65, 0x62, 0x6d, 0xbb, 0x06, 0x45, 0x97, 0x3d, 0xf1, 0x35, 0x53, 0xa3, 0x4a, 0x29, 0xc4, 0x77, - 0xd8, 0x13, 0x8a, 0x05, 0x99, 0x57, 0xd5, 0x7e, 0x18, 0x95, 0x6a, 0xb2, 0xaa, 0x46, 0x21, 0x18, - 0xf1, 0x68, 0xbf, 0x57, 0x60, 0xde, 0x57, 0x84, 0xb9, 0x1e, 0x7a, 0x3c, 0x56, 0xfc, 0xf4, 0x7c, - 0x86, 0xe5, 0xd2, 0xa2, 0xf4, 0x85, 0x25, 0x3f, 0x58, 0x89, 0x15, 0xbe, 0x2d, 0x28, 0x31, 0x8f, - 0xf6, 0xdd, 0x5a, 0x41, 0x24, 0xbe, 0xfc, 0xe6, 0x3d, 0x2b, 0x41, 0x4b, 0x6d, 0x2e, 0x8e, 0x7d, - 0x14, 0xed, 0x96, 0xbc, 0x18, 0x9b, 0xb6, 0x7d, 0x30, 0x1c, 0xc8, 0xbb, 0x7e, 0x15, 0x4a, 0x3d, - 0x1e, 0x7d, 0xe2, 0xf0, 0x95, 0x48, 0x52, 0x84, 0x24, 0xf6, 0x69, 0xda, 0x6f, 0xe6, 0x60, 0x31, - 0x99, 0xe2, 0x66, 0x50, 0xf6, 0xaf, 0x40, 0xd1, 0xe3, 0x5e, 0x29, 0x24, 0x7d, 0x2c, 0x1c, 0x22, - 0x28, 0xe8, 0x1a, 0x94, 0x0d, 0xdb, 0xf2, 0xa8, 0xe5, 0x09, 0xed, 0xab, 0xcd, 0x85, 0xd1, 0xf1, - 0x5a, 0xf9, 0x9e, 0xbf, 0x84, 0x03, 0x1a, 0x62, 0x00, 0x86, 0x6d, 0x75, 0x99, 0xc7, 0x6c, 0xcb, - 0xad, 0x15, 0x85, 0x2d, 0xf3, 0x84, 0x61, 0x78, 0xd8, 0x7b, 0x81, 0x74, 0xa4, 0x71, 0xb8, 0xe4, - 0xe2, 0x18, 0x38, 0xfa, 0x1a, 0x9c, 0x15, 0xe2, 0xed, 0x2e, 0xb5, 0x3c, 0xe6, 0x1d, 0xc9, 0x82, - 0xbb, 0x22, 0xc5, 0xce, 0xb6, 0xe3, 0x44, 0x9c, 0xe4, 0x45, 0x3f, 0x82, 0x2a, 0xaf, 0x49, 0xb4, - 0x7b, 0xaf, 0x47, 0x58, 0x3f, 0xa8, 0xaf, 0xf7, 0xa6, 0x2e, 0x77, 0x42, 0xf1, 0x00, 0xe5, 0xbe, - 0xe5, 0x39, 0xb1, 0xac, 0x17, 0x27, 0xe1, 0xc4, 0x76, 0xe8, 0x5d, 0x28, 0x1b, 0x0e, 0xe5, 0x8d, - 0x6b, 0xad, 0x2c, 0x1c, 0xfa, 0xc5, 0x7c, 0x0e, 0xed, 0xb0, 0x3e, 0x95, 0x96, 0xf7, 0xc5, 0x71, - 0x80, 0xc3, 0xc3, 0x83, 0xb9, 0xee, 0x90, 0x76, 0x9b, 0x47, 0xb5, 0x4a, 0xee, 0x94, 0x1d, 0x1e, - 0xa4, 0xcd, 0x65, 0x9d, 0x66, 0x95, 0x87, 0x47, 0x5b, 0xe2, 0xe0, 0x10, 0x11, 0x7d, 0x2f, 0x40, - 0xef, 0xd8, 0xa2, 0xa0, 0x2e, 0x6c, 0xbc, 0x39, 0x0d, 0xfa, 0xce, 0x50, 0xdc, 0xba, 0x38, 0x7c, - 0xc7, 0xc6, 0x21, 0xe4, 0xea, 0x37, 0x60, 0x79, 0xcc, 0x90, 0xe8, 0x1c, 0xa8, 0x07, 0x54, 0xb6, - 0xeb, 0x98, 0xff, 0x44, 0x17, 0xa0, 0x74, 0x48, 0x7a, 0x43, 0x79, 0x4f, 0xb1, 0xff, 0xe7, 0x4e, - 0xe1, 0x96, 0xa2, 0xfd, 0xa2, 0x00, 0x0b, 0xbe, 0x67, 0x3c, 0x87, 0x92, 0xfe, 0x0c, 0x42, 0xa6, - 0x03, 0x45, 0x77, 0x40, 0x0d, 0x99, 0x8e, 0x37, 0x72, 0xdf, 0x1c, 0xa1, 0x1f, 0xaf, 0xf8, 0x51, - 0x98, 0xf1, 0x7f, 0x58, 0xa0, 0xa1, 0xc7, 0x61, 0xed, 0xf0, 0xcb, 0xee, 0xcd, 0x29, 0x71, 0x4f, - 0xac, 0x21, 0xda, 0x1f, 0x14, 0x38, 0x17, 0xe3, 0x9e, 0xd5, 0xa3, 0x62, 0xeb, 0x45, 0x4b, 0x57, - 0x94, 0x5b, 0x63, 0xe5, 0x4b, 0xfb, 0x6d, 0x41, 0x26, 0xd7, 0xe0, 0x14, 0xbc, 0xf6, 0xce, 0xe0, - 0x18, 0x8f, 0x12, 0x1e, 0xbf, 0x35, 0x9d, 0x67, 0xa2, 0x4e, 0x2f, 0xd3, 0xef, 0xbb, 0x29, 0xbf, - 0xdf, 0x79, 0x21, 0xf4, 0x93, 0xbd, 0xff, 0x93, 0x02, 0xac, 0x64, 0x6a, 0x84, 0x5e, 0x87, 0x39, - 0xbf, 0x29, 0x13, 0x96, 0xab, 0x44, 0x08, 0x3e, 0x0f, 0x96, 0x54, 0x64, 0x02, 0x38, 0x74, 0x60, - 0xbb, 0xcc, 0xb3, 0x9d, 0x23, 0x69, 0x87, 0xaf, 0xe6, 0xd0, 0x14, 0x87, 0x42, 0x31, 0x33, 0x2c, - 0x72, 0x43, 0x47, 0x14, 0x1c, 0x83, 0x46, 0x8f, 0xb8, 0x42, 0xc4, 0xa4, 0xdc, 0x1c, 0xea, 0x34, - 0xe1, 0x15, 0xc7, 0x8f, 0x0e, 0xc1, 0x91, 0xb0, 0x44, 0xd4, 0x7e, 0x57, 0x80, 0x57, 0x26, 0x98, - 0x0e, 0xe1, 0x84, 0x21, 0x78, 0x87, 0x31, 0x95, 0x1b, 0x9a, 0x90, 0x61, 0x34, 0x96, 0x61, 0xb4, - 0xdb, 0x2f, 0x62, 0x34, 0xe9, 0xdd, 0x13, 0xcc, 0xf6, 0x38, 0x65, 0xb6, 0x9b, 0x53, 0x9a, 0x2d, - 0x75, 0x7f, 0x52, 0x86, 0xfb, 0xa3, 0x22, 0x5f, 0x2d, 0xfe, 0x61, 0x67, 0xd0, 0x94, 0xed, 0x24, - 0x9b, 0xb2, 0x69, 0xbd, 0x91, 0xdd, 0x9a, 0xfd, 0x53, 0x01, 0x14, 0xe3, 0xda, 0x22, 0x83, 0x01, - 0xb3, 0xcc, 0xff, 0xbb, 0x34, 0xf8, 0xbc, 0x2e, 0xfe, 0xd7, 0x85, 0x84, 0xb7, 0x44, 0x9c, 0xef, - 0x24, 0xde, 0xde, 0xd1, 0x25, 0x92, 0xbd, 0xfd, 0x6b, 0x12, 0x64, 0xa5, 0x95, 0xc5, 0x84, 0xb3, - 0x65, 0xd1, 0xbb, 0x50, 0xf4, 0x88, 0x19, 0xf8, 0xa8, 0x31, 0xe5, 0x9b, 0x2b, 0xd6, 0x6c, 0x12, - 0xd3, 0xc5, 0x02, 0x0a, 0x59, 0x50, 0xed, 0xc5, 0x1a, 0xe7, 0x69, 0x6b, 0x61, 0xbc, 0xe9, 0x8e, - 0xda, 0xb1, 0xf8, 0x2a, 0x4e, 0xe0, 0x6b, 0xbf, 0x4a, 0x56, 0x14, 0x99, 0x0c, 0x4e, 0xc5, 0x5a, - 0x0f, 0x13, 0xd6, 0xca, 0x73, 0x24, 0xfe, 0xd0, 0xea, 0x76, 0x88, 0x79, 0xff, 0x90, 0x5a, 0x1e, - 0x0f, 0x92, 0x4c, 0x93, 0x51, 0xb8, 0x34, 0x18, 0xee, 0xf6, 0x98, 0x91, 0xa9, 0x8d, 0xbc, 0x25, - 0x57, 0xa5, 0xe0, 0xa5, 0xed, 0xc9, 0xac, 0xf8, 0x24, 0x1c, 0xed, 0xa3, 0x62, 0xf0, 0x3a, 0x11, - 0x96, 0xea, 0x90, 0x59, 0x04, 0xce, 0xb7, 0xfc, 0x9b, 0xee, 0x87, 0xcd, 0xd4, 0x17, 0xac, 0x9c, - 0x18, 0xb4, 0x6e, 0x00, 0xc8, 0xa1, 0x31, 0xb3, 0x2d, 0x61, 0x16, 0x35, 0xda, 0xfd, 0x9d, 0x90, - 0x82, 0x63, 0x5c, 0x68, 0x3f, 0xe3, 0x51, 0x73, 0x33, 0x9f, 0x1a, 0xc2, 0x69, 0xf9, 0xdf, 0x34, - 0x61, 0x8a, 0x28, 0xbd, 0x94, 0x14, 0x91, 0x8e, 0xa3, 0xb9, 0x53, 0x8e, 0xa3, 0x3f, 0x27, 0x53, - 0x6b, 0x87, 0x98, 0x33, 0x28, 0x12, 0x0f, 0x93, 0x45, 0x62, 0x7d, 0xba, 0x22, 0xd1, 0x21, 0xe6, - 0x84, 0x3a, 0xf1, 0x2f, 0x05, 0x96, 0xc7, 0x62, 0x2f, 0xc8, 0xba, 0xca, 0x84, 0x39, 0xfe, 0x76, - 0x52, 0x99, 0x2f, 0x4d, 0x71, 0x4b, 0xb2, 0xd5, 0x48, 0x5d, 0x3e, 0xf5, 0xf4, 0x2e, 0x9f, 0xf6, - 0xa1, 0x0a, 0x17, 0xb2, 0x3a, 0xb5, 0x97, 0x35, 0x9a, 0x4c, 0x0f, 0x16, 0x0b, 0xb3, 0x1e, 0x2c, - 0xaa, 0xff, 0xfd, 0x60, 0xb1, 0x38, 0x9b, 0xc1, 0xe2, 0x87, 0x05, 0xb8, 0x98, 0xdd, 0xff, 0x9d, - 0xd2, 0x74, 0x31, 0xea, 0x1c, 0x0b, 0x2f, 0xbf, 0x73, 0x44, 0x77, 0x60, 0x91, 0x74, 0xfd, 0x6b, - 0x46, 0x7a, 0xbc, 0x68, 0x89, 0x7b, 0x3c, 0xdf, 0x44, 0xa3, 0xe3, 0xb5, 0xc5, 0xbb, 0x09, 0x0a, - 0x4e, 0x71, 0x6a, 0x9f, 0xa8, 0x80, 0xc6, 0xa7, 0x43, 0xe8, 0x8e, 0x9c, 0x58, 0xf9, 0x81, 0xf8, - 0x7a, 0x7c, 0x62, 0xf5, 0xd9, 0xf1, 0xda, 0xc5, 0x71, 0x89, 0xd8, 0x2c, 0x6b, 0x33, 0x34, 0xa1, - 0x3f, 0xef, 0xba, 0x99, 0x34, 0xca, 0x67, 0xc7, 0x6b, 0x19, 0xdf, 0x4f, 0xf5, 0x10, 0x29, 0x65, - 0x3a, 0x13, 0xce, 0xf6, 0x88, 0xeb, 0x6d, 0x3b, 0xf6, 0x2e, 0xed, 0x30, 0xf9, 0x11, 0x70, 0xba, - 0x89, 0x4e, 0x38, 0xb3, 0xda, 0x8c, 0x03, 0xe1, 0x24, 0x2e, 0x3a, 0x04, 0xc4, 0x17, 0x3a, 0x0e, - 0xb1, 0x5c, 0xff, 0x48, 0x7c, 0xb7, 0xe2, 0xd4, 0xbb, 0xad, 0xca, 0xdd, 0xd0, 0xe6, 0x18, 0x1a, - 0xce, 0xd8, 0x81, 0xbf, 0x0e, 0x1d, 0x4a, 0x5c, 0xdb, 0x92, 0x13, 0xb6, 0xd0, 0xcb, 0x58, 0xac, - 0x62, 0x49, 0x45, 0x5f, 0x80, 0x72, 0x9f, 0xba, 0x2e, 0x2f, 0x5f, 0x73, 0x82, 0x71, 0x49, 0x32, - 0x96, 0xb7, 0xfc, 0x65, 0x1c, 0xd0, 0xb5, 0x0f, 0x14, 0x88, 0x5c, 0x24, 0xaa, 0x2e, 0x33, 0xee, - 0xfb, 0x93, 0xb9, 0x5b, 0x50, 0xb5, 0x1d, 0x93, 0x58, 0xec, 0x89, 0x5f, 0xa2, 0x7d, 0x07, 0x87, - 0x11, 0xff, 0x20, 0x46, 0xc3, 0x09, 0x4e, 0x5e, 0xda, 0x0d, 0xbb, 0xdf, 0xb7, 0x2d, 0x9e, 0xb5, - 0xa5, 0x6b, 0x63, 0x39, 0x2f, 0xa0, 0xe0, 0x18, 0x97, 0xf6, 0x4b, 0x05, 0x96, 0x52, 0x33, 0x30, - 0xf4, 0x73, 0x05, 0x2e, 0xba, 0x99, 0xca, 0xc9, 0x90, 0xbb, 0x3d, 0xcd, 0xe8, 0x2b, 0x01, 0xd0, - 0xac, 0x4b, 0x7d, 0x26, 0x9c, 0x1e, 0x4f, 0xd8, 0x58, 0xfb, 0x9b, 0x02, 0xe7, 0xd2, 0xd3, 0xb4, - 0xff, 0x45, 0x45, 0xd1, 0x5b, 0xb0, 0xe0, 0xf7, 0x8f, 0xdf, 0xa6, 0x47, 0xed, 0x96, 0xf4, 0xc2, - 0x79, 0x09, 0xb6, 0xb0, 0x1d, 0x91, 0x70, 0x9c, 0x4f, 0xfb, 0x69, 0x01, 0x2a, 0x41, 0xc5, 0x42, - 0xdf, 0x8d, 0xa6, 0xa3, 0xca, 0xd4, 0xb7, 0x3b, 0xbc, 0x74, 0x63, 0x13, 0xd2, 0x97, 0xff, 0x6d, - 0xfb, 0x6a, 0xd0, 0xae, 0xf9, 0xed, 0x75, 0x76, 0x13, 0x96, 0xec, 0x38, 0x8b, 0x79, 0x3a, 0x4e, - 0xed, 0x23, 0x15, 0x96, 0xc7, 0x0a, 0x38, 0xba, 0x9d, 0xc8, 0x79, 0xd7, 0x52, 0x39, 0x6f, 0x65, - 0x4c, 0xe0, 0xd4, 0x52, 0x5e, 0x76, 0x26, 0x52, 0x67, 0x98, 0x89, 0x8a, 0x79, 0x33, 0x51, 0xe9, - 0xe4, 0x4c, 0x94, 0xf2, 0xce, 0x5c, 0x2e, 0xef, 0x0c, 0x60, 0x29, 0xd5, 0x91, 0xa0, 0x1b, 0x50, - 0x61, 0x96, 0x4b, 0x8d, 0xa1, 0x43, 0xe5, 0x0c, 0x2d, 0x6c, 0x59, 0xdb, 0x72, 0x1d, 0x87, 0x1c, - 0xa8, 0x01, 0xf3, 0xae, 0xb1, 0x4f, 0xbb, 0xc3, 0x1e, 0xed, 0x0a, 0x87, 0x54, 0xa2, 0x2f, 0x61, - 0x3b, 0x01, 0x01, 0x47, 0x3c, 0xda, 0xbf, 0x8b, 0x50, 0x8d, 0x37, 0x14, 0x39, 0x3e, 0xca, 0xbd, - 0x07, 0x0b, 0xc4, 0xb2, 0x6c, 0x8f, 0xf8, 0x8d, 0xa3, 0x5f, 0xd6, 0xdf, 0x9e, 0xb2, 0x71, 0xd1, - 0xef, 0x46, 0x10, 0xfe, 0xd7, 0x8d, 0x30, 0x94, 0x63, 0x14, 0x1c, 0xdf, 0x09, 0xdd, 0x95, 0xdd, - 0xa2, 0x9a, 0xbf, 0x5b, 0xac, 0xa4, 0x3a, 0xc5, 0x06, 0xcc, 0x87, 0x1d, 0x91, 0xfc, 0x1c, 0x1c, - 0xda, 0x27, 0x8a, 0xc9, 0x88, 0x07, 0xe9, 0x09, 0x2f, 0x96, 0x84, 0x17, 0x17, 0x4f, 0x78, 0xd1, - 0xa5, 0x5b, 0xd1, 0xb9, 0x53, 0x6d, 0x45, 0x33, 0xfa, 0xc8, 0xf2, 0x4c, 0xfa, 0xc8, 0xd5, 0xaf, - 0xc3, 0xb9, 0xb4, 0x07, 0xa7, 0xfa, 0xac, 0xb2, 0x0d, 0x68, 0x7c, 0xff, 0xe7, 0xf5, 0x5e, 0xe3, - 0x12, 0x51, 0x22, 0x6a, 0x5e, 0x7f, 0xfa, 0xac, 0x7e, 0xe6, 0xe3, 0x67, 0xf5, 0x33, 0x9f, 0x3e, - 0xab, 0x9f, 0xf9, 0xf1, 0xa8, 0xae, 0x3c, 0x1d, 0xd5, 0x95, 0x8f, 0x47, 0x75, 0xe5, 0xd3, 0x51, - 0x5d, 0xf9, 0xfb, 0xa8, 0xae, 0xfc, 0xec, 0x1f, 0xf5, 0x33, 0x8f, 0x0a, 0x87, 0xeb, 0xff, 0x09, - 0x00, 0x00, 0xff, 0xff, 0x9e, 0x09, 0x46, 0xf1, 0x49, 0x27, 0x00, 0x00, + // 2445 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0x4f, 0x6c, 0xdb, 0xc8, + 0xd5, 0x0f, 0x45, 0x49, 0x96, 0x9f, 0x1d, 0x3b, 0x9e, 0xc4, 0x59, 0xad, 0x92, 0xb5, 0xbd, 0xcc, + 0x97, 0x20, 0x5f, 0x9b, 0xa5, 0x6a, 0x6f, 0x76, 0x9b, 0xa4, 0x40, 0x77, 0xa3, 0x28, 0x0d, 0xd4, + 0xda, 0x8d, 0x77, 0xac, 0xe6, 0x10, 0xa4, 0x40, 0x69, 0x6a, 0x4c, 0x4f, 0x2d, 0x91, 0x2a, 0x49, + 0x79, 0xd7, 0x41, 0x0b, 0xf4, 0x50, 0x2c, 0xf6, 0xd0, 0x43, 0x7b, 0xde, 0x63, 0xb1, 0x28, 0x7a, + 0x2e, 0x5a, 0xf4, 0xde, 0x16, 0x08, 0x7a, 0xe9, 0x62, 0x7b, 0xd9, 0x4b, 0x8d, 0x46, 0xed, 0xb9, + 0xb7, 0x5e, 0xf6, 0x54, 0xcc, 0x70, 0x48, 0x0e, 0x29, 0xca, 0xa6, 0xd2, 0xd8, 0x68, 0x6f, 0xe2, + 0xcc, 0x7b, 0xbf, 0xf7, 0xf8, 0xde, 0xbc, 0x3f, 0xf3, 0x28, 0x58, 0xb5, 0xa8, 0xbf, 0x3b, 0xd8, + 0xd6, 0x4d, 0xa7, 0x57, 0x77, 0xfa, 0xc4, 0xf6, 0x76, 0xe9, 0x8e, 0x5f, 0x37, 0xfa, 0xb4, 0x4e, + 0x7b, 0x86, 0x45, 0xea, 0xfb, 0xab, 0x75, 0x8b, 0xd8, 0xc4, 0x35, 0x7c, 0xd2, 0xd1, 0xfb, 0xae, + 0xe3, 0x3b, 0xe8, 0xf5, 0x98, 0x45, 0x8f, 0x58, 0x74, 0xa3, 0x4f, 0x75, 0xce, 0xa2, 0xef, 0xaf, + 0xd6, 0xde, 0x90, 0x50, 0x2d, 0xc7, 0x72, 0xea, 0x9c, 0x73, 0x7b, 0xb0, 0xc3, 0x9f, 0xf8, 0x03, + 0xff, 0x15, 0x20, 0xd6, 0xb4, 0xbd, 0x5b, 0x9e, 0x4e, 0x1d, 0x2e, 0xd6, 0x74, 0xdc, 0x2c, 0xa9, + 0xb5, 0x9b, 0x31, 0x4d, 0xcf, 0x30, 0x77, 0xa9, 0x4d, 0xdc, 0x83, 0x7a, 0x7f, 0xcf, 0x62, 0x0b, + 0x5e, 0xbd, 0x47, 0x7c, 0x23, 0x8b, 0xab, 0x3e, 0x8e, 0xcb, 0x1d, 0xd8, 0x3e, 0xed, 0x91, 0x11, + 0x86, 0xb7, 0x8f, 0x63, 0xf0, 0xcc, 0x5d, 0xd2, 0x33, 0x46, 0xf8, 0xde, 0x1c, 0xc7, 0x37, 0xf0, + 0x69, 0xb7, 0x4e, 0x6d, 0xdf, 0xf3, 0xdd, 0x34, 0x93, 0xf6, 0x99, 0x02, 0x17, 0x9a, 0x8e, 0xb9, + 0x47, 0xdc, 0x16, 0xb3, 0x1c, 0x26, 0x3b, 0xc4, 0x25, 0xb6, 0x49, 0xd0, 0x0d, 0xa8, 0xb8, 0xc4, + 0xa2, 0x9e, 0xef, 0x1e, 0x54, 0x95, 0x15, 0xe5, 0xfa, 0x74, 0xe3, 0xdc, 0xb3, 0xc3, 0xe5, 0x33, + 0xc3, 0xc3, 0xe5, 0x0a, 0x16, 0xeb, 0x38, 0xa2, 0x40, 0x75, 0x98, 0xb6, 0x8d, 0x1e, 0xf1, 0xfa, + 0x86, 0x49, 0xaa, 0x05, 0x4e, 0xbe, 0x20, 0xc8, 0xa7, 0xbf, 0x1d, 0x6e, 0xe0, 0x98, 0x06, 0xad, + 0x40, 0x91, 0x3d, 0x54, 0x55, 0x4e, 0x3b, 0x2b, 0x68, 0x8b, 0x8c, 0x16, 0xf3, 0x1d, 0xf4, 0x1a, + 0xa8, 0xbe, 0x61, 0x55, 0x8b, 0x9c, 0x60, 0x46, 0x10, 0xa8, 0x6d, 0xc3, 0xc2, 0x6c, 0x1d, 0xd5, + 0xa0, 0x40, 0x9b, 0xd5, 0x12, 0xdf, 0x05, 0xb1, 0x5b, 0x68, 0x35, 0x71, 0x81, 0x36, 0xb5, 0x3f, + 0x4f, 0x41, 0x89, 0xbf, 0x0e, 0xfa, 0x1e, 0x54, 0x98, 0x5f, 0x3a, 0x86, 0x6f, 0xf0, 0xb7, 0x98, + 0x59, 0xfb, 0x8a, 0x1e, 0x98, 0x49, 0x97, 0xcd, 0xa4, 0xf7, 0xf7, 0x2c, 0xb6, 0xe0, 0xe9, 0x8c, + 0x5a, 0xdf, 0x5f, 0xd5, 0x1f, 0x6e, 0x7f, 0x9f, 0x98, 0xfe, 0x06, 0xf1, 0x8d, 0x06, 0x12, 0xe8, + 0x10, 0xaf, 0xe1, 0x08, 0x15, 0x6d, 0xc2, 0x85, 0x4e, 0x86, 0xfd, 0x84, 0x11, 0x2e, 0x0b, 0xde, + 0x4c, 0x1b, 0xe3, 0x4c, 0x4e, 0xf4, 0x43, 0x38, 0x2f, 0xad, 0x6f, 0x84, 0xea, 0xab, 0x5c, 0xfd, + 0x37, 0xc6, 0xaa, 0x2f, 0x4e, 0x87, 0x8e, 0x8d, 0xf7, 0xef, 0x7f, 0xe0, 0x13, 0xdb, 0xa3, 0x8e, + 0xdd, 0xb8, 0x24, 0xe4, 0x9f, 0x6f, 0x8e, 0x22, 0xe2, 0x2c, 0x31, 0x68, 0x1b, 0x6a, 0x19, 0xcb, + 0x8f, 0x88, 0xcb, 0xf0, 0x84, 0x37, 0x34, 0x81, 0x5a, 0x6b, 0x8e, 0xa5, 0xc4, 0x47, 0xa0, 0xa0, + 0x8d, 0xe4, 0x1b, 0x1a, 0x36, 0xdd, 0x21, 0x9e, 0x2f, 0x9c, 0x99, 0xa9, 0xb2, 0x20, 0xc1, 0x59, + 0x7c, 0x68, 0x1f, 0x16, 0xa4, 0xe5, 0x75, 0xe3, 0x80, 0xb8, 0x5e, 0xb5, 0xbc, 0xa2, 0x72, 0x73, + 0x1d, 0x9b, 0x29, 0xf4, 0x98, 0xab, 0xf1, 0xaa, 0x90, 0xbd, 0xd0, 0x4c, 0xe3, 0xe1, 0x51, 0x11, + 0x88, 0x00, 0x78, 0xd4, 0xb2, 0x0d, 0x7f, 0xe0, 0x12, 0xaf, 0x3a, 0xc5, 0x05, 0xae, 0xe6, 0x15, + 0xb8, 0x15, 0x72, 0xc6, 0xe7, 0x2b, 0x5a, 0xf2, 0xb0, 0x04, 0x8c, 0x1e, 0xc2, 0xa2, 0x24, 0x3b, + 0x26, 0xaa, 0x56, 0x56, 0xd4, 0xeb, 0xb3, 0x8d, 0x57, 0x87, 0x87, 0xcb, 0x8b, 0xcd, 0x2c, 0x02, + 0x9c, 0xcd, 0x87, 0x76, 0xe1, 0x72, 0x86, 0x19, 0x37, 0x48, 0x87, 0x1a, 0xed, 0x83, 0x3e, 0xa9, + 0x4e, 0x73, 0x3f, 0xfc, 0x9f, 0x50, 0xeb, 0x72, 0xf3, 0x08, 0x5a, 0x7c, 0x24, 0x12, 0x7a, 0x90, + 0xf0, 0xcc, 0x3d, 0xc7, 0xde, 0xa1, 0x56, 0x15, 0x38, 0x7c, 0x96, 0xa9, 0x03, 0x02, 0x3c, 0xca, + 0xa3, 0x99, 0x70, 0x9e, 0x3f, 0x36, 0xba, 0xce, 0x76, 0x14, 0x29, 0x1e, 0xd2, 0xa0, 0xdc, 0x0d, + 0xdc, 0xad, 0xac, 0xa8, 0x2c, 0x11, 0x0c, 0x0f, 0x97, 0xcb, 0xc2, 0x61, 0x62, 0x07, 0x5d, 0x87, + 0x4a, 0x2f, 0x3c, 0x61, 0x41, 0x50, 0xce, 0xb2, 0x24, 0x16, 0x1d, 0xa9, 0x68, 0x57, 0xfb, 0xa3, + 0x0a, 0xf3, 0x5c, 0x4a, 0xab, 0xd7, 0x77, 0x5c, 0x7f, 0xab, 0x4f, 0x4c, 0x74, 0x1f, 0x8a, 0x3b, + 0xae, 0xd3, 0x13, 0xc9, 0xe3, 0x8a, 0x14, 0x7d, 0x3a, 0x2b, 0x13, 0x71, 0xaa, 0x88, 0xb4, 0x8a, + 0x93, 0xd9, 0x37, 0x5c, 0xa7, 0x87, 0x39, 0x3b, 0x7a, 0x17, 0x0a, 0xbe, 0xc3, 0xc5, 0xcf, 0xac, + 0x5d, 0xcf, 0x02, 0x59, 0x77, 0x4c, 0xa3, 0x9b, 0x46, 0x2a, 0xb3, 0x9c, 0xd6, 0x76, 0x70, 0xc1, + 0x77, 0x50, 0x17, 0x66, 0x29, 0x57, 0x6b, 0xd3, 0xe9, 0x52, 0xf3, 0x40, 0xa4, 0x83, 0xb5, 0x1c, + 0xc7, 0xad, 0x6d, 0x58, 0x2d, 0x89, 0xb3, 0x71, 0x41, 0xe8, 0x37, 0x2b, 0xaf, 0xe2, 0x04, 0x3a, + 0xba, 0x0b, 0xf3, 0xd4, 0x36, 0xbb, 0x83, 0x4e, 0x1c, 0x9d, 0x2c, 0xf4, 0x2b, 0x8d, 0x57, 0x04, + 0xf3, 0x7c, 0x2b, 0xb9, 0x8d, 0xd3, 0xf4, 0xe8, 0x03, 0x98, 0x77, 0xc3, 0x37, 0x11, 0x3a, 0x97, + 0xb8, 0xce, 0x6f, 0xe5, 0xd3, 0x19, 0x27, 0x99, 0x63, 0xc9, 0xa9, 0x0d, 0x9c, 0x16, 0xa3, 0xfd, + 0x45, 0x81, 0x05, 0xd9, 0x8f, 0xbe, 0xe1, 0x0f, 0x3c, 0xd4, 0x86, 0xb2, 0xc7, 0x7f, 0x09, 0x5f, + 0xde, 0xc8, 0x57, 0x08, 0x02, 0xee, 0xc6, 0x9c, 0x90, 0x5e, 0x0e, 0x9e, 0xb1, 0xc0, 0x42, 0x2d, + 0x28, 0x71, 0xa5, 0x23, 0xdf, 0xe6, 0x0c, 0xff, 0xc6, 0xf4, 0xf0, 0x70, 0x39, 0x28, 0x52, 0x38, + 0x40, 0x08, 0x0b, 0x9e, 0x9a, 0x5d, 0xf0, 0xb4, 0x0f, 0x15, 0x80, 0x38, 0xfb, 0x44, 0x05, 0x54, + 0x19, 0x5b, 0x40, 0xaf, 0x42, 0xd1, 0xa3, 0x4f, 0x03, 0xcd, 0xd4, 0xb8, 0x1c, 0x73, 0xf6, 0x2d, + 0xfa, 0x94, 0x60, 0xbe, 0xcd, 0x4a, 0x77, 0x2f, 0x0a, 0x7d, 0x35, 0x59, 0xba, 0xe3, 0x38, 0x8f, + 0x69, 0xb4, 0x0e, 0xcc, 0xc5, 0x7a, 0x34, 0x59, 0xcd, 0x78, 0x5d, 0x48, 0x52, 0xb8, 0xa4, 0xb3, + 0xc7, 0x4a, 0x29, 0xe4, 0x90, 0xf2, 0x3b, 0x05, 0xa6, 0x03, 0x31, 0xd4, 0xf3, 0xd1, 0x93, 0x91, + 0x3a, 0xae, 0xe7, 0x73, 0x1f, 0xe3, 0xe6, 0x55, 0x3c, 0xea, 0x5e, 0xc2, 0x15, 0xa9, 0x86, 0x6f, + 0x40, 0x89, 0xfa, 0xa4, 0xe7, 0x55, 0x0b, 0x3c, 0x87, 0xe7, 0x77, 0xe2, 0x59, 0x01, 0x5a, 0x6a, + 0x31, 0x76, 0x1c, 0xa0, 0x68, 0xb7, 0xc4, 0xf1, 0x5b, 0x77, 0x9c, 0xbd, 0x41, 0x5f, 0x44, 0xd4, + 0x15, 0x28, 0x75, 0x59, 0x8c, 0x73, 0x13, 0x57, 0x62, 0x4e, 0x1e, 0xf8, 0x38, 0xd8, 0xd3, 0x7e, + 0x5d, 0x16, 0xb6, 0x8d, 0xb2, 0xf5, 0x29, 0x74, 0x30, 0x2b, 0x50, 0xf4, 0x63, 0xaf, 0x44, 0x27, + 0x89, 0x3b, 0x84, 0xef, 0xa0, 0xab, 0x30, 0x65, 0x3a, 0xb6, 0x4f, 0x6c, 0x9f, 0x6b, 0x3f, 0xdb, + 0x98, 0x19, 0x1e, 0x2e, 0x4f, 0xdd, 0x0b, 0x96, 0x70, 0xb8, 0x87, 0x28, 0x80, 0xe9, 0xd8, 0x1d, + 0xea, 0x53, 0xc7, 0xf6, 0xaa, 0x45, 0x6e, 0xcb, 0x3c, 0xc1, 0x1e, 0xbd, 0xec, 0xbd, 0x90, 0x3b, + 0xd6, 0x38, 0x5a, 0xf2, 0xb0, 0x04, 0x8e, 0xbe, 0x06, 0x67, 0x39, 0x7b, 0xab, 0x43, 0x6c, 0x9f, + 0xfa, 0x07, 0xa2, 0x77, 0x58, 0x14, 0x6c, 0x67, 0x5b, 0xf2, 0x26, 0x4e, 0xd2, 0xa2, 0x1f, 0xc1, + 0x2c, 0x2b, 0xaf, 0xa4, 0x73, 0xaf, 0x6b, 0xd0, 0x5e, 0xd8, 0x2a, 0xdc, 0x9b, 0xb8, 0x72, 0x73, + 0xc5, 0x43, 0x94, 0xfb, 0xb6, 0xef, 0x4a, 0xb9, 0x55, 0xde, 0xc2, 0x09, 0x71, 0xe8, 0x3d, 0x98, + 0x32, 0x5d, 0xc2, 0x7a, 0xf0, 0xea, 0x14, 0x77, 0xe8, 0x97, 0xf2, 0x39, 0xb4, 0x4d, 0x7b, 0x44, + 0x58, 0x3e, 0x60, 0xc7, 0x21, 0x0e, 0x0b, 0x0f, 0xea, 0x79, 0x03, 0xd2, 0x69, 0x1c, 0x54, 0x2b, + 0xb9, 0x0b, 0x43, 0xf4, 0x22, 0x2d, 0xc6, 0xeb, 0x06, 0x75, 0xb1, 0x25, 0x70, 0x70, 0x84, 0x88, + 0xbe, 0x1b, 0xa2, 0xb7, 0x1d, 0xde, 0x1b, 0xcc, 0xac, 0xbd, 0x39, 0x09, 0xfa, 0xd6, 0x80, 0x9f, + 0x3a, 0x19, 0xbe, 0xed, 0xe0, 0x08, 0xb2, 0xf6, 0x0e, 0x2c, 0x8c, 0x18, 0x12, 0x9d, 0x03, 0x75, + 0x8f, 0x88, 0x9b, 0x07, 0x66, 0x3f, 0xd1, 0x05, 0x28, 0xed, 0x1b, 0xdd, 0x81, 0x38, 0xa7, 0x38, + 0x78, 0xb8, 0x53, 0xb8, 0xa5, 0x68, 0xbf, 0x28, 0xc0, 0x4c, 0xe0, 0x19, 0xdf, 0x25, 0x46, 0xef, + 0x14, 0x42, 0xa6, 0x0d, 0x45, 0xaf, 0x4f, 0x4c, 0x91, 0xf4, 0xd7, 0x72, 0x9f, 0x1c, 0xae, 0x1f, + 0xeb, 0x2b, 0xe2, 0x30, 0x63, 0x4f, 0x98, 0xa3, 0xa1, 0x27, 0x51, 0x85, 0x0a, 0x8a, 0xfb, 0xcd, + 0x09, 0x71, 0x8f, 0xac, 0x54, 0xda, 0xef, 0x15, 0x38, 0x27, 0x51, 0x9f, 0xd6, 0xfd, 0x68, 0xe3, + 0x45, 0x0b, 0x64, 0x9c, 0x5b, 0xa5, 0x22, 0xa9, 0xfd, 0xa6, 0x20, 0x92, 0x6b, 0xf8, 0x16, 0xac, + 0xc2, 0x9f, 0xc2, 0x6b, 0x3c, 0x4e, 0x78, 0xfc, 0xd6, 0x64, 0x9e, 0x89, 0xfb, 0xc9, 0x4c, 0xbf, + 0x6f, 0xa7, 0xfc, 0x7e, 0xe7, 0x85, 0xd0, 0x8f, 0xf6, 0xfe, 0x4f, 0x0a, 0xb0, 0x98, 0xa9, 0x11, + 0xba, 0x06, 0xe5, 0xa0, 0xf5, 0xe3, 0x96, 0xab, 0xc4, 0x08, 0x01, 0x0d, 0x16, 0xbb, 0xc8, 0x02, + 0x70, 0x49, 0xdf, 0xf1, 0xa8, 0xef, 0xb8, 0x07, 0xc2, 0x0e, 0x5f, 0xcd, 0xa1, 0x29, 0x8e, 0x98, + 0x24, 0x33, 0xcc, 0x31, 0x43, 0xc7, 0x3b, 0x58, 0x82, 0x46, 0x8f, 0x99, 0x42, 0x86, 0x45, 0x98, + 0x39, 0xd4, 0x49, 0xc2, 0x4b, 0xc6, 0x8f, 0x5f, 0x82, 0x21, 0x61, 0x81, 0xa8, 0xfd, 0xb6, 0x00, + 0xaf, 0x8c, 0x31, 0x1d, 0xc2, 0x09, 0x43, 0xb0, 0x0e, 0x63, 0x22, 0x37, 0x04, 0x97, 0x8f, 0x94, + 0xd1, 0x68, 0x86, 0xd1, 0x6e, 0xbf, 0x88, 0xd1, 0x84, 0x77, 0x8f, 0x30, 0xdb, 0x93, 0x94, 0xd9, + 0x6e, 0x4e, 0x68, 0xb6, 0xd4, 0xf9, 0x49, 0x19, 0xee, 0x93, 0x62, 0x22, 0xee, 0xc4, 0x0d, 0xf8, + 0xe4, 0xe3, 0xae, 0x03, 0xa5, 0xed, 0xae, 0xb3, 0x1d, 0xb6, 0x66, 0xef, 0x4c, 0xe6, 0x93, 0x40, + 0x4d, 0x9d, 0xdd, 0x1a, 0x45, 0x81, 0x8e, 0xb2, 0x0a, 0x5f, 0xc3, 0x01, 0x38, 0xda, 0x4d, 0xd9, + 0xee, 0xdd, 0x17, 0x12, 0x13, 0x98, 0x2c, 0x90, 0x33, 0xc6, 0x8e, 0xb5, 0x3d, 0x80, 0x58, 0x9b, + 0x8c, 0x2a, 0xf7, 0x40, 0xae, 0x72, 0x13, 0x8c, 0x13, 0xa2, 0x66, 0x5c, 0x2a, 0x8c, 0xb5, 0x1f, + 0x88, 0xba, 0x38, 0x56, 0xda, 0x7a, 0x52, 0xda, 0xdb, 0xb9, 0x93, 0x73, 0xe2, 0x1a, 0x2e, 0xd7, + 0xe2, 0x3f, 0x28, 0xe2, 0x0e, 0x2d, 0x2c, 0x73, 0xf2, 0xcd, 0xfb, 0x56, 0xb2, 0x79, 0x9f, 0x34, + 0x6a, 0xb3, 0x5b, 0xf8, 0x7f, 0x28, 0x80, 0x24, 0xaa, 0x0d, 0xa3, 0xdf, 0xa7, 0xb6, 0xf5, 0x3f, + 0x57, 0x2e, 0x8f, 0xbb, 0x53, 0xfe, 0xaa, 0x90, 0xf0, 0x16, 0xaf, 0x07, 0x5b, 0x89, 0x71, 0x53, + 0x9c, 0x6c, 0xc4, 0x4d, 0xf3, 0x35, 0x01, 0xb2, 0xd8, 0xcc, 0x22, 0xc2, 0xd9, 0xbc, 0xe8, 0x3d, + 0x28, 0xfa, 0x86, 0x15, 0xfa, 0xa8, 0x3e, 0xe1, 0x04, 0x40, 0xba, 0x94, 0x18, 0x96, 0x87, 0x39, + 0x14, 0xb2, 0x61, 0xb6, 0x2b, 0x5d, 0xb0, 0x26, 0xed, 0x99, 0xe4, 0xcb, 0x59, 0xdc, 0xb6, 0xcb, + 0xab, 0x38, 0x81, 0xaf, 0xfd, 0x32, 0xd9, 0x79, 0x88, 0xa2, 0x71, 0x22, 0xd6, 0x7a, 0x94, 0xb0, + 0x56, 0x9e, 0x57, 0x62, 0xd7, 0xfe, 0x4e, 0xdb, 0xb0, 0xee, 0xef, 0x13, 0xdb, 0x67, 0x41, 0x92, + 0x69, 0x32, 0x02, 0x97, 0xfa, 0x83, 0xed, 0x2e, 0x35, 0x33, 0xb5, 0x11, 0xa7, 0xe4, 0x8a, 0x60, + 0xbc, 0xb4, 0x39, 0x9e, 0x14, 0x1f, 0x85, 0xa3, 0x7d, 0x5c, 0x0c, 0x6f, 0xb1, 0xdc, 0x52, 0x6d, + 0xe3, 0x34, 0x02, 0xe7, 0x9b, 0xc1, 0x49, 0x0f, 0xc2, 0x66, 0xe2, 0x03, 0x36, 0x95, 0xf8, 0xb6, + 0xb0, 0x06, 0x20, 0xbe, 0x93, 0x50, 0xc7, 0xe6, 0x66, 0x51, 0x63, 0xe9, 0x0f, 0xa2, 0x1d, 0x2c, + 0x51, 0xa1, 0xdd, 0x8c, 0xcb, 0xef, 0xcd, 0x7c, 0x6a, 0x70, 0xa7, 0xe5, 0xbf, 0xfb, 0x46, 0x29, + 0xa2, 0xf4, 0x52, 0x52, 0x44, 0x3a, 0x8e, 0xca, 0x27, 0x1c, 0x47, 0x7f, 0x4a, 0xa6, 0xd6, 0xb6, + 0x61, 0x9d, 0x42, 0x91, 0x78, 0x94, 0x2c, 0x12, 0xab, 0x93, 0x15, 0x89, 0xb6, 0x61, 0x8d, 0xa9, + 0x13, 0xff, 0x54, 0x60, 0x61, 0x24, 0xf6, 0xc2, 0xac, 0xab, 0x8c, 0xf9, 0x74, 0xb5, 0x99, 0x54, + 0xe6, 0xcb, 0x13, 0x9c, 0x92, 0x6c, 0x35, 0x52, 0x87, 0x4f, 0x3d, 0xb9, 0xc3, 0xa7, 0x7d, 0xa4, + 0xc2, 0x85, 0xac, 0x8e, 0xfe, 0x65, 0x0d, 0xca, 0xd3, 0x63, 0xee, 0xc2, 0x69, 0x8f, 0xb9, 0xd5, + 0xff, 0x7c, 0xcc, 0x5d, 0x3c, 0x9d, 0x31, 0xf7, 0x47, 0x05, 0xb8, 0x98, 0x7d, 0x4f, 0x38, 0xa1, + 0x59, 0x77, 0x7c, 0xc3, 0x28, 0xbc, 0xfc, 0x1b, 0x06, 0xba, 0x03, 0x73, 0x46, 0x27, 0x38, 0x66, + 0x46, 0x97, 0x15, 0x2d, 0x7e, 0x8e, 0xa7, 0x1b, 0x68, 0x78, 0xb8, 0x3c, 0x77, 0x37, 0xb1, 0x83, + 0x53, 0x94, 0xda, 0x67, 0x2a, 0xa0, 0xd1, 0x29, 0x22, 0xba, 0x23, 0x26, 0x9b, 0x41, 0x20, 0x5e, + 0x93, 0x27, 0x9b, 0x5f, 0x1c, 0x2e, 0x5f, 0x1c, 0xe5, 0x90, 0x66, 0x9e, 0xeb, 0x91, 0x09, 0x83, + 0xb9, 0xe8, 0xcd, 0xa4, 0x51, 0xbe, 0x38, 0x5c, 0xce, 0xf8, 0xcb, 0x80, 0x1e, 0x21, 0xa5, 0x4c, + 0x67, 0xc1, 0xd9, 0xae, 0xe1, 0xf9, 0x9b, 0xae, 0xb3, 0x4d, 0xda, 0x54, 0x7c, 0xf7, 0x9e, 0x6c, + 0xf2, 0x17, 0xcd, 0x36, 0xd7, 0x65, 0x20, 0x9c, 0xc4, 0x45, 0xfb, 0x80, 0xd8, 0x42, 0xdb, 0x35, + 0x6c, 0x2f, 0x78, 0x25, 0x26, 0xad, 0x38, 0xb1, 0xb4, 0x9a, 0x90, 0x86, 0xd6, 0x47, 0xd0, 0x70, + 0x86, 0x04, 0x74, 0x0d, 0xca, 0x2e, 0x31, 0x3c, 0xc7, 0x16, 0x93, 0xd8, 0xc8, 0xcb, 0x98, 0xaf, + 0x62, 0xb1, 0x8b, 0xfe, 0x1f, 0xa6, 0x7a, 0xc4, 0xf3, 0x58, 0xf9, 0x2a, 0x73, 0xc2, 0x79, 0x41, + 0x38, 0xb5, 0x11, 0x2c, 0xe3, 0x70, 0x5f, 0xfb, 0x50, 0x81, 0xd8, 0x45, 0xbc, 0xea, 0x52, 0xf3, + 0x7e, 0x30, 0xc1, 0xbd, 0x05, 0xb3, 0x8e, 0x6b, 0x19, 0x36, 0x7d, 0x1a, 0x94, 0xe8, 0xc0, 0xc1, + 0x51, 0xc4, 0x3f, 0x94, 0xf6, 0x70, 0x82, 0x92, 0x95, 0x76, 0xd3, 0xe9, 0xf5, 0x1c, 0x9b, 0x65, + 0x6d, 0xe1, 0x5a, 0x29, 0xe7, 0x85, 0x3b, 0x58, 0xa2, 0xd2, 0x3e, 0x51, 0x60, 0x3e, 0x35, 0x2b, + 0x45, 0x3f, 0x57, 0xe0, 0xa2, 0x97, 0xa9, 0x9c, 0x08, 0xb9, 0xdb, 0x93, 0x8c, 0x48, 0x13, 0x00, + 0x8d, 0x25, 0xa1, 0xcf, 0x98, 0xb7, 0xc7, 0x63, 0x04, 0x6b, 0x7f, 0x55, 0xe0, 0x5c, 0x7a, 0xea, + 0xfa, 0xdf, 0xa8, 0x28, 0x7a, 0x0b, 0x66, 0x82, 0xfe, 0xf1, 0x5b, 0xe4, 0xa0, 0xd5, 0x14, 0x5e, + 0x38, 0x2f, 0xc0, 0x66, 0x36, 0xe3, 0x2d, 0x2c, 0xd3, 0x69, 0x3f, 0x2d, 0x40, 0x25, 0xac, 0x58, + 0xe8, 0x3b, 0xf1, 0x14, 0x5d, 0x99, 0xf8, 0x74, 0x47, 0x87, 0x6e, 0x64, 0x92, 0xfe, 0xf2, 0xff, + 0xce, 0x71, 0x25, 0x6c, 0xd7, 0x82, 0xf6, 0x3a, 0xbb, 0x09, 0x4b, 0x76, 0x9c, 0xc5, 0x3c, 0x1d, + 0xa7, 0xf6, 0xb1, 0x0a, 0x0b, 0x23, 0x05, 0x1c, 0xdd, 0x4e, 0xe4, 0xbc, 0xab, 0xa9, 0x9c, 0xb7, + 0x38, 0xc2, 0x70, 0x62, 0x29, 0x2f, 0x3b, 0x13, 0xa9, 0xa7, 0x98, 0x89, 0x8a, 0x79, 0x33, 0x51, + 0xe9, 0xe8, 0x4c, 0x94, 0xf2, 0x4e, 0x39, 0x97, 0x77, 0xfa, 0x30, 0x9f, 0xea, 0x48, 0xd0, 0x0d, + 0xa8, 0x50, 0xdb, 0x23, 0xe6, 0xc0, 0x25, 0x62, 0xd6, 0x1a, 0xb5, 0xac, 0x2d, 0xb1, 0x8e, 0x23, + 0x0a, 0x54, 0x87, 0x69, 0xcf, 0xdc, 0x25, 0x9d, 0x41, 0x97, 0x74, 0xb8, 0x43, 0x2a, 0xf1, 0x17, + 0xd3, 0xad, 0x70, 0x03, 0xc7, 0x34, 0xda, 0xbf, 0x8a, 0x30, 0x2b, 0x37, 0x14, 0x39, 0x3e, 0x11, + 0xbf, 0x0f, 0x33, 0x86, 0x6d, 0x3b, 0xbe, 0x11, 0x34, 0x8e, 0x85, 0xdc, 0xc3, 0x2f, 0x59, 0x8e, + 0x7e, 0x37, 0x86, 0x08, 0x86, 0x5f, 0x51, 0x28, 0x4b, 0x3b, 0x58, 0x96, 0x84, 0xee, 0x8a, 0x6e, + 0x51, 0xcd, 0xdf, 0x2d, 0x56, 0x52, 0x9d, 0x62, 0x1d, 0xa6, 0xa3, 0x8e, 0x48, 0xfc, 0x39, 0x21, + 0xb2, 0x4f, 0x1c, 0x93, 0x31, 0x0d, 0xd2, 0x13, 0x5e, 0x2c, 0x71, 0x2f, 0xce, 0x1d, 0x71, 0xa3, + 0x4b, 0xb7, 0xa2, 0xe5, 0x13, 0x6d, 0x45, 0x33, 0xfa, 0xc8, 0xa9, 0x53, 0xe9, 0x23, 0x6b, 0x5f, + 0x87, 0x73, 0x69, 0x0f, 0x4e, 0xf4, 0xf9, 0x6d, 0x13, 0xd0, 0xa8, 0xfc, 0xe3, 0x7a, 0xaf, 0x51, + 0x8e, 0x38, 0x11, 0x35, 0xae, 0x3f, 0x7b, 0xbe, 0x74, 0xe6, 0xd3, 0xe7, 0x4b, 0x67, 0x3e, 0x7f, + 0xbe, 0x74, 0xe6, 0xc7, 0xc3, 0x25, 0xe5, 0xd9, 0x70, 0x49, 0xf9, 0x74, 0xb8, 0xa4, 0x7c, 0x3e, + 0x5c, 0x52, 0xfe, 0x36, 0x5c, 0x52, 0x7e, 0xf6, 0xf7, 0xa5, 0x33, 0x8f, 0x0b, 0xfb, 0xab, 0xff, + 0x0e, 0x00, 0x00, 0xff, 0xff, 0x09, 0x41, 0xe3, 0x7d, 0x3c, 0x2a, 0x00, 0x00, } diff --git a/vendor/github.com/openshift/api/image/v1/generated.proto b/vendor/github.com/openshift/api/image/v1/generated.proto index bbbfa5df7d40..417a6c1bb29d 100644 --- a/vendor/github.com/openshift/api/image/v1/generated.proto +++ b/vendor/github.com/openshift/api/image/v1/generated.proto @@ -68,6 +68,20 @@ message Image { optional string dockerImageConfig = 10; } +// ImageBlobReferences describes the blob references within an image. +message ImageBlobReferences { + // layers is the list of blobs that compose this image, from base layer to top layer. + // All layers referenced by this array will be defined in the blobs map. Some images + // may have zero layers. + // +optional + repeated string layers = 1; + + // manifest, if set, is the blob that contains the image manifest. Some images do + // not have separate manifest blobs and this field will be set to nil if so. + // +optional + optional string manifest = 2; +} + // ImageImportSpec describes a request to import a specific image. message ImageImportSpec { // From is the source of an image to import; only kind DockerImage is allowed @@ -110,6 +124,16 @@ message ImageLayer { optional string mediaType = 3; } +// ImageLayerData contains metadata about an image layer. +message ImageLayerData { + // Size of the layer in bytes as defined by the underlying store. This field is + // optional if the necessary information about size is not available. + optional int64 size = 1; + + // MediaType of the referenced object. + optional string mediaType = 2; +} + // ImageList is a list of Image objects. message ImageList { // Standard object's metadata. @@ -237,6 +261,20 @@ message ImageStreamImportStatus { repeated ImageImportStatus images = 3; } +// ImageStreamLayers describes information about the layers referenced by images in this +// image stream. +message ImageStreamLayers { + // Standard object's metadata. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // blobs is a map of blob name to metadata about the blob. + map blobs = 2; + + // images is a map between an image name and the names of the blobs and manifests that + // comprise the image. + map images = 3; +} + // ImageStreamList is a list of ImageStream objects. message ImageStreamList { // Standard object's metadata. diff --git a/vendor/github.com/openshift/api/image/v1/register.go b/vendor/github.com/openshift/api/image/v1/register.go index 5377b4cade6d..46f785c471aa 100644 --- a/vendor/github.com/openshift/api/image/v1/register.go +++ b/vendor/github.com/openshift/api/image/v1/register.go @@ -43,6 +43,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ImageStreamTag{}, &ImageStreamTagList{}, &ImageStreamImage{}, + &ImageStreamLayers{}, &ImageStreamImport{}, &corev1.SecretList{}, ) diff --git a/vendor/github.com/openshift/api/image/v1/types.go b/vendor/github.com/openshift/api/image/v1/types.go index 2d268174e723..9887fe542a88 100644 --- a/vendor/github.com/openshift/api/image/v1/types.go +++ b/vendor/github.com/openshift/api/image/v1/types.go @@ -159,6 +159,7 @@ type ImageStreamList struct { // +genclient // +genclient:method=Secrets,verb=get,subresource=secrets,result=k8s.io/api/core/v1.SecretList +// +genclient:method=Layers,verb=get,subresource=layers,result=github.com/openshift/api/image/v1.ImageStreamLayers // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ImageStream stores a mapping of tags to images, metadata overrides that are applied @@ -417,6 +418,43 @@ type DockerImageReference struct { ID string `protobuf:"bytes,5,opt,name=iD"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamLayers describes information about the layers referenced by images in this +// image stream. +type ImageStreamLayers struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // blobs is a map of blob name to metadata about the blob. + Blobs map[string]ImageLayerData `json:"blobs" protobuf:"bytes,2,rep,name=blobs"` + // images is a map between an image name and the names of the blobs and manifests that + // comprise the image. + Images map[string]ImageBlobReferences `json:"images" protobuf:"bytes,3,rep,name=images"` +} + +// ImageBlobReferences describes the blob references within an image. +type ImageBlobReferences struct { + // layers is the list of blobs that compose this image, from base layer to top layer. + // All layers referenced by this array will be defined in the blobs map. Some images + // may have zero layers. + // +optional + Layers []string `json:"layers" protobuf:"bytes,1,rep,name=layers"` + // manifest, if set, is the blob that contains the image manifest. Some images do + // not have separate manifest blobs and this field will be set to nil if so. + // +optional + Manifest *string `json:"manifest" protobuf:"bytes,2,opt,name=manifest"` +} + +// ImageLayerData contains metadata about an image layer. +type ImageLayerData struct { + // Size of the layer in bytes as defined by the underlying store. This field is + // optional if the necessary information about size is not available. + LayerSize *int64 `json:"size" protobuf:"varint,1,opt,name=size"` + // MediaType of the referenced object. + MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` +} + // +genclient // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/image/v1/types_swagger_doc_generated.go b/vendor/github.com/openshift/api/image/v1/types_swagger_doc_generated.go index 43f9d8951bcb..dc9b83c49779 100644 --- a/vendor/github.com/openshift/api/image/v1/types_swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/image/v1/types_swagger_doc_generated.go @@ -42,6 +42,16 @@ func (Image) SwaggerDoc() map[string]string { return map_Image } +var map_ImageBlobReferences = map[string]string{ + "": "ImageBlobReferences describes the blob references within an image.", + "layers": "layers is the list of blobs that compose this image, from base layer to top layer. All layers referenced by this array will be defined in the blobs map. Some images may have zero layers.", + "manifest": "manifest, if set, is the blob that contains the image manifest. Some images do not have separate manifest blobs and this field will be set to nil if so.", +} + +func (ImageBlobReferences) SwaggerDoc() map[string]string { + return map_ImageBlobReferences +} + var map_ImageImportSpec = map[string]string{ "": "ImageImportSpec describes a request to import a specific image.", "from": "From is the source of an image to import; only kind DockerImage is allowed", @@ -77,6 +87,16 @@ func (ImageLayer) SwaggerDoc() map[string]string { return map_ImageLayer } +var map_ImageLayerData = map[string]string{ + "": "ImageLayerData contains metadata about an image layer.", + "size": "Size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", + "mediaType": "MediaType of the referenced object.", +} + +func (ImageLayerData) SwaggerDoc() map[string]string { + return map_ImageLayerData +} + var map_ImageList = map[string]string{ "": "ImageList is a list of Image objects.", "metadata": "Standard object's metadata.", @@ -167,6 +187,17 @@ func (ImageStreamImportStatus) SwaggerDoc() map[string]string { return map_ImageStreamImportStatus } +var map_ImageStreamLayers = map[string]string{ + "": "ImageStreamLayers describes information about the layers referenced by images in this image stream.", + "metadata": "Standard object's metadata.", + "blobs": "blobs is a map of blob name to metadata about the blob.", + "images": "images is a map between an image name and the names of the blobs and manifests that comprise the image.", +} + +func (ImageStreamLayers) SwaggerDoc() map[string]string { + return map_ImageStreamLayers +} + var map_ImageStreamList = map[string]string{ "": "ImageStreamList is a list of ImageStream objects.", "metadata": "Standard object's metadata.", diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go index fd84d5c30817..6aa13046be40 100644 --- a/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go @@ -75,6 +75,36 @@ func (in *Image) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBlobReferences) DeepCopyInto(out *ImageBlobReferences) { + *out = *in + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Manifest != nil { + in, out := &in.Manifest, &out.Manifest + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBlobReferences. +func (in *ImageBlobReferences) DeepCopy() *ImageBlobReferences { + if in == nil { + return nil + } + out := new(ImageBlobReferences) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageImportSpec) DeepCopyInto(out *ImageImportSpec) { *out = *in @@ -145,6 +175,31 @@ func (in *ImageLayer) DeepCopy() *ImageLayer { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLayerData) DeepCopyInto(out *ImageLayerData) { + *out = *in + if in.LayerSize != nil { + in, out := &in.LayerSize, &out.LayerSize + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayerData. +func (in *ImageLayerData) DeepCopy() *ImageLayerData { + if in == nil { + return nil + } + out := new(ImageLayerData) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageList) DeepCopyInto(out *ImageList) { *out = *in @@ -421,6 +476,50 @@ func (in *ImageStreamImportStatus) DeepCopy() *ImageStreamImportStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamLayers) DeepCopyInto(out *ImageStreamLayers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Blobs != nil { + in, out := &in.Blobs, &out.Blobs + *out = make(map[string]ImageLayerData, len(*in)) + for key, val := range *in { + newVal := new(ImageLayerData) + val.DeepCopyInto(newVal) + (*out)[key] = *newVal + } + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make(map[string]ImageBlobReferences, len(*in)) + for key, val := range *in { + newVal := new(ImageBlobReferences) + val.DeepCopyInto(newVal) + (*out)[key] = *newVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamLayers. +func (in *ImageStreamLayers) DeepCopy() *ImageStreamLayers { + if in == nil { + return nil + } + out := new(ImageStreamLayers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamLayers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageStreamList) DeepCopyInto(out *ImageStreamList) { *out = *in diff --git a/vendor/github.com/openshift/client-go/glide.lock b/vendor/github.com/openshift/client-go/glide.lock index 046d6e411796..9991bea1ac0e 100644 --- a/vendor/github.com/openshift/client-go/glide.lock +++ b/vendor/github.com/openshift/client-go/glide.lock @@ -1,5 +1,5 @@ hash: 91748cede774246187f6a8627e0a335015261f8e28708731a279c6ce205d9254 -updated: 2018-06-29T15:11:35.519708105-04:00 +updated: 2018-07-10T15:41:29.920958699-04:00 imports: - name: github.com/davecgh/go-spew version: 782f4967f2dc4564575ca782fe2d04090b5faca8 @@ -49,7 +49,7 @@ imports: - name: github.com/modern-go/reflect2 version: 05fbef0ca5da472bbf96c9322b84a53edc03c9fd - name: github.com/openshift/api - version: 31a7bbd2266d178da3c12bb83f5274d387f775e6 + version: 04a26bf3b8d69c390642c5803fe4cfdb899112aa subpackages: - apps/v1 - authorization/v1 diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go index 15d18fa8888d..b193c7d793bf 100644 --- a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go @@ -134,3 +134,14 @@ func (c *FakeImageStreams) Secrets(imageStreamName string, options v1.GetOptions } return obj.(*core_v1.SecretList), err } + +// Layers takes name of the imageStream, and returns the corresponding imageStreamLayers object, and an error if there is any. +func (c *FakeImageStreams) Layers(imageStreamName string, options v1.GetOptions) (result *image_v1.ImageStreamLayers, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(imagestreamsResource, c.ns, "layers", imageStreamName), &image_v1.ImageStreamLayers{}) + + if obj == nil { + return nil, err + } + return obj.(*image_v1.ImageStreamLayers), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go index 1e1cc2e24f9d..307afcfff68e 100644 --- a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go @@ -30,6 +30,7 @@ type ImageStreamInterface interface { Watch(opts meta_v1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ImageStream, err error) Secrets(imageStreamName string, options meta_v1.GetOptions) (*core_v1.SecretList, error) + Layers(imageStreamName string, options meta_v1.GetOptions) (*v1.ImageStreamLayers, error) ImageStreamExpansion } @@ -173,3 +174,17 @@ func (c *imageStreams) Secrets(imageStreamName string, options meta_v1.GetOption Into(result) return } + +// Layers takes name of the imageStream, and returns the corresponding v1.ImageStreamLayers object, and an error if there is any. +func (c *imageStreams) Layers(imageStreamName string, options meta_v1.GetOptions) (result *v1.ImageStreamLayers, err error) { + result = &v1.ImageStreamLayers{} + err = c.client.Get(). + Namespace(c.ns). + Resource("imagestreams"). + Name(imageStreamName). + SubResource("layers"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/openshift/imagebuilder/cmd/imagebuilder/imagebuilder.go b/vendor/github.com/openshift/imagebuilder/cmd/imagebuilder/imagebuilder.go index 62a932c27049..499e466f987b 100644 --- a/vendor/github.com/openshift/imagebuilder/cmd/imagebuilder/imagebuilder.go +++ b/vendor/github.com/openshift/imagebuilder/cmd/imagebuilder/imagebuilder.go @@ -82,7 +82,7 @@ func main() { if glog.V(2) { log.Printf("Builder: "+format, args...) } else { - fmt.Fprintf(options.ErrOut, "--> %s\n", fmt.Sprintf(format, args...)) + fmt.Fprintf(options.Out, "--> %s\n", fmt.Sprintf(format, args...)) } } diff --git a/vendor/github.com/openshift/imagebuilder/dockerclient/archive.go b/vendor/github.com/openshift/imagebuilder/dockerclient/archive.go index ee5ca9a30e51..c8c0f68bc92f 100644 --- a/vendor/github.com/openshift/imagebuilder/dockerclient/archive.go +++ b/vendor/github.com/openshift/imagebuilder/dockerclient/archive.go @@ -38,7 +38,9 @@ func FilterArchive(r io.Reader, w io.Writer, fn TransformFileFunc) error { } var body io.Reader = tr + name := h.Name data, ok, skip, err := fn(h, tr) + glog.V(6).Infof("Transform %s -> %s: data=%t ok=%t skip=%t err=%v", name, h.Name, data != nil, ok, skip, err) if err != nil { return err } @@ -100,7 +102,7 @@ func NewLazyArchive(fn CreateFileFunc) io.ReadCloser { return pr } -func archiveFromURL(src, dst, tempDir string) (io.Reader, io.Closer, error) { +func archiveFromURL(src, dst, tempDir string, check DirectoryCheck) (io.Reader, io.Closer, error) { // get filename from URL u, err := url.Parse(src) if err != nil { @@ -151,7 +153,7 @@ func archiveFromURL(src, dst, tempDir string) (io.Reader, io.Closer, error) { return archive, closers{resp.Body.Close, archive.Close}, nil } -func archiveFromDisk(directory string, src, dst string, allowDownload bool, excludes []string) (io.Reader, io.Closer, error) { +func archiveFromDisk(directory string, src, dst string, allowDownload bool, excludes []string, check DirectoryCheck) (io.Reader, io.Closer, error) { var err error if filepath.IsAbs(src) { src, err = filepath.Rel(filepath.Dir(src), src) @@ -173,13 +175,66 @@ func archiveFromDisk(directory string, src, dst string, allowDownload bool, excl directory = filepath.Dir(directory) } - options := archiveOptionsFor(infos, dst, excludes) + options, err := archiveOptionsFor(infos, dst, excludes, check) + if err != nil { + return nil, nil, err + } glog.V(4).Infof("Tar of %s %#v", directory, options) rc, err := archive.TarWithOptions(directory, options) return rc, rc, err } +func archiveFromFile(file string, src, dst string, excludes []string, check DirectoryCheck) (io.Reader, io.Closer, error) { + var err error + if filepath.IsAbs(src) { + src, err = filepath.Rel(filepath.Dir(src), src) + if err != nil { + return nil, nil, err + } + } + + mapper, _, err := newArchiveMapper(src, dst, excludes, true, check) + if err != nil { + return nil, nil, err + } + + f, err := os.Open(file) + if err != nil { + return nil, nil, err + } + + r, err := transformArchive(f, true, mapper.Filter) + return r, f, err +} + +func archiveFromContainer(in io.Reader, src, dst string, excludes []string, check DirectoryCheck) (io.Reader, string, error) { + mapper, archiveRoot, err := newArchiveMapper(src, dst, excludes, false, check) + if err != nil { + return nil, "", err + } + + r, err := transformArchive(in, false, mapper.Filter) + return r, archiveRoot, err +} + +func transformArchive(r io.Reader, compressed bool, fn TransformFileFunc) (io.Reader, error) { + pr, pw := io.Pipe() + go func() { + if compressed { + in, err := archive.DecompressStream(r) + if err != nil { + pw.CloseWithError(err) + return + } + r = in + } + err := FilterArchive(r, pw, fn) + pw.CloseWithError(err) + }() + return pr, nil +} + // * -> test // a (dir) -> test // a (file) -> test @@ -193,9 +248,15 @@ func archivePathMapper(src, dst string, isDestDir bool) (fn func(name string, is } pattern := filepath.Base(srcPattern) + glog.V(6).Infof("creating mapper for srcPattern=%s pattern=%s dst=%s isDestDir=%t", srcPattern, pattern, dst, isDestDir) + // no wildcards if !containsWildcards(pattern) { return func(name string, isDir bool) (string, bool) { + // when extracting from the working directory, Docker prefaces with ./ + if strings.HasPrefix(name, "."+string(filepath.Separator)) { + name = name[2:] + } if name == srcPattern { if isDir { return "", false @@ -232,7 +293,7 @@ func archivePathMapper(src, dst string, isDestDir bool) (fn func(name string, is } prefix += string(filepath.Separator) - // nested with pattern pattern + // nested with pattern return func(name string, isDir bool) (string, bool) { remainder := strings.TrimPrefix(name, prefix) if remainder == name { @@ -251,56 +312,6 @@ func archivePathMapper(src, dst string, isDestDir bool) (fn func(name string, is } } -func archiveFromFile(file string, src, dst string, excludes []string) (io.Reader, io.Closer, error) { - var err error - if filepath.IsAbs(src) { - src, err = filepath.Rel(filepath.Dir(src), src) - if err != nil { - return nil, nil, err - } - } - - mapper, _, err := newArchiveMapper(src, dst, excludes, true) - if err != nil { - return nil, nil, err - } - - f, err := os.Open(file) - if err != nil { - return nil, nil, err - } - - r, err := transformArchive(f, true, mapper.Filter) - return r, f, err -} - -func archiveFromContainer(in io.Reader, src, dst string, excludes []string) (io.Reader, string, error) { - mapper, archiveRoot, err := newArchiveMapper(src, dst, excludes, false) - if err != nil { - return nil, "", err - } - - r, err := transformArchive(in, false, mapper.Filter) - return r, archiveRoot, err -} - -func transformArchive(r io.Reader, compressed bool, fn TransformFileFunc) (io.Reader, error) { - pr, pw := io.Pipe() - go func() { - if compressed { - in, err := archive.DecompressStream(r) - if err != nil { - pw.CloseWithError(err) - return - } - r = in - } - err := FilterArchive(r, pw, fn) - pw.CloseWithError(err) - }() - return pr, nil -} - type archiveMapper struct { exclude *fileutils.PatternMatcher rename func(name string, isDir bool) (string, bool) @@ -308,7 +319,7 @@ type archiveMapper struct { resetOwners bool } -func newArchiveMapper(src, dst string, excludes []string, resetOwners bool) (*archiveMapper, string, error) { +func newArchiveMapper(src, dst string, excludes []string, resetOwners bool, check DirectoryCheck) (*archiveMapper, string, error) { ex, err := fileutils.NewPatternMatcher(excludes) if err != nil { return nil, "", err @@ -316,6 +327,13 @@ func newArchiveMapper(src, dst string, excludes []string, resetOwners bool) (*ar isDestDir := strings.HasSuffix(dst, "/") || path.Base(dst) == "." dst = path.Clean(dst) + if !isDestDir && check != nil { + isDir, err := check.IsDirectory(dst) + if err != nil { + return nil, "", err + } + isDestDir = isDir + } var prefix string archiveRoot := src @@ -380,19 +398,27 @@ func (m *archiveMapper) Filter(h *tar.Header, r io.Reader) ([]byte, bool, bool, return nil, false, false, nil } -func archiveOptionsFor(infos []CopyInfo, dst string, excludes []string) *archive.TarOptions { +func archiveOptionsFor(infos []CopyInfo, dst string, excludes []string, check DirectoryCheck) (*archive.TarOptions, error) { dst = trimLeadingPath(dst) dstIsDir := strings.HasSuffix(dst, "/") || dst == "." || dst == "/" || strings.HasSuffix(dst, "/.") dst = trimTrailingSlash(dst) dstIsRoot := dst == "." || dst == "/" + if !dstIsDir && check != nil { + isDir, err := check.IsDirectory(dst) + if err != nil { + return nil, fmt.Errorf("unable to check whether %s is a directory: %v", dst, err) + } + dstIsDir = isDir + } + options := &archive.TarOptions{ ChownOpts: &idtools.IDPair{UID: 0, GID: 0}, } pm, err := fileutils.NewPatternMatcher(excludes) if err != nil { - return options + return options, nil } for _, info := range infos { @@ -418,12 +444,9 @@ func archiveOptionsFor(infos []CopyInfo, dst string, excludes []string) *archive case len(infos) > 1: // put each input into the target, which is assumed to be a directory ([Dockerfile, dir] -> [a/Dockerfile, a/dir]) options.RebaseNames[infoPath] = path.Join(dst, path.Base(infoPath)) - case info.FileInfo.IsDir() && dstIsDir: - // mapping a directory to an explicit directory ([dir] -> [a]) - options.RebaseNames[infoPath] = dst case info.FileInfo.IsDir(): - // mapping a directory to an implicit directory ([Dockerfile] -> [dir/Dockerfile]) - options.RebaseNames[infoPath] = path.Join(dst, path.Base(infoPath)) + // mapping a directory to a destination, explicit or not ([dir] -> [a]) + options.RebaseNames[infoPath] = dst case info.FromDir: // this is a file that was part of an explicit directory request, no transformation options.RebaseNames[infoPath] = path.Join(dst, path.Base(infoPath)) @@ -437,7 +460,7 @@ func archiveOptionsFor(infos []CopyInfo, dst string, excludes []string) *archive } options.ExcludePatterns = excludes - return options + return options, nil } func sourceToDestinationName(src, dst string, forceDir bool) string { diff --git a/vendor/github.com/openshift/imagebuilder/dockerclient/archive_test.go b/vendor/github.com/openshift/imagebuilder/dockerclient/archive_test.go index 89961c769b63..978c8564bfd3 100644 --- a/vendor/github.com/openshift/imagebuilder/dockerclient/archive_test.go +++ b/vendor/github.com/openshift/imagebuilder/dockerclient/archive_test.go @@ -13,6 +13,20 @@ import ( "github.com/docker/docker/pkg/archive" ) +type testDirectoryCheck map[string]bool + +func (c testDirectoryCheck) IsDirectory(path string) (bool, error) { + if c == nil { + return false, nil + } + + isDir, ok := c[path] + if !ok { + return false, fmt.Errorf("no path defined for %s", path) + } + return isDir, nil +} + type archiveGenerator struct { Headers []*tar.Header } @@ -81,6 +95,7 @@ func Test_archiveFromFile(t *testing.T) { dst string excludes []string expect []string + check map[string]bool }{ { file: testArchive, @@ -233,6 +248,7 @@ func Test_archiveFromFile(t *testing.T) { testCase.src, testCase.dst, testCase.excludes, + testDirectoryCheck(testCase.check), ) if err != nil { t.Fatal(err) @@ -266,6 +282,7 @@ func Test_archiveFromContainer(t *testing.T) { excludes []string expect []string path string + check map[string]bool }{ { gen: newArchiveGenerator().File("file").Dir("test").File("test/file2"), @@ -394,6 +411,14 @@ func Test_archiveFromContainer(t *testing.T) { path: "/a", expect: nil, }, + { + gen: newArchiveGenerator().File("b"), + src: "/a/b", + dst: "/a", + check: map[string]bool{"/a": true}, + path: "/a", + expect: nil, + }, { gen: newArchiveGenerator().Dir("a/").File("a/b"), src: "/a/b", @@ -403,6 +428,13 @@ func Test_archiveFromContainer(t *testing.T) { "/a", }, }, + { + gen: newArchiveGenerator().Dir("./a").File("./a/b"), + src: "a", + dst: "/a", + path: ".", + expect: []string{"/a/b"}, + }, } for i := range testCases { testCase := testCases[i] @@ -412,6 +444,7 @@ func Test_archiveFromContainer(t *testing.T) { testCase.src, testCase.dst, testCase.excludes, + testDirectoryCheck(testCase.check), ) if err != nil { t.Fatal(err) diff --git a/vendor/github.com/openshift/imagebuilder/dockerclient/client.go b/vendor/github.com/openshift/imagebuilder/dockerclient/client.go index bbf1ab0ec2ae..fa8b4a8fa2be 100644 --- a/vendor/github.com/openshift/imagebuilder/dockerclient/client.go +++ b/vendor/github.com/openshift/imagebuilder/dockerclient/client.go @@ -799,8 +799,9 @@ func (e *ClientExecutor) archiveFromContainer(from string, src, dst string) (io. e.Deferred = append([]func() error{func() error { return e.removeContainer(containerID) }}, e.Deferred...) } + check := newDirectoryCheck(e.Client, e.Container.ID) pr, pw := io.Pipe() - ar, archiveRoot, err := archiveFromContainer(pr, src, dst, nil) + ar, archiveRoot, err := archiveFromContainer(pr, src, dst, nil, check) if err != nil { pr.Close() return nil, nil, err @@ -818,26 +819,30 @@ func (e *ClientExecutor) archiveFromContainer(from string, src, dst string) (io. // TODO: this does not support decompressing nested archives for ADD (when the source is a compressed file) func (e *ClientExecutor) Archive(fromFS bool, src, dst string, allowDownload bool, excludes []string) (io.Reader, io.Closer, error) { + var check DirectoryCheck + if e.Container != nil { + check = newDirectoryCheck(e.Client, e.Container.ID) + } if isURL(src) { if !allowDownload { return nil, nil, fmt.Errorf("source can't be a URL") } glog.V(5).Infof("Archiving %s -> %s from URL", src, dst) - return archiveFromURL(src, dst, e.TempDir) + return archiveFromURL(src, dst, e.TempDir, check) } // the input is from the filesystem, use the source as the input if fromFS { glog.V(5).Infof("Archiving %s %s -> %s from a filesystem location", src, ".", dst) - return archiveFromDisk(src, ".", dst, allowDownload, excludes) + return archiveFromDisk(src, ".", dst, allowDownload, excludes, check) } // if the context is in archive form, read from it without decompressing if len(e.ContextArchive) > 0 { glog.V(5).Infof("Archiving %s %s -> %s from context archive", e.ContextArchive, src, dst) - return archiveFromFile(e.ContextArchive, src, dst, excludes) + return archiveFromFile(e.ContextArchive, src, dst, excludes, check) } // if the context is a directory, we only allow relative includes glog.V(5).Infof("Archiving %q %q -> %q from disk", e.Directory, src, dst) - return archiveFromDisk(e.Directory, src, dst, allowDownload, excludes) + return archiveFromDisk(e.Directory, src, dst, allowDownload, excludes, check) } // ContainerVolumeTracker manages tracking archives of specific paths inside a container. diff --git a/vendor/github.com/openshift/imagebuilder/dockerclient/conformance_test.go b/vendor/github.com/openshift/imagebuilder/dockerclient/conformance_test.go index 4a5c8fc9d510..911083ada297 100644 --- a/vendor/github.com/openshift/imagebuilder/dockerclient/conformance_test.go +++ b/vendor/github.com/openshift/imagebuilder/dockerclient/conformance_test.go @@ -111,6 +111,8 @@ func TestCopyFrom(t *testing.T) { {name: "copy file to deeper directory with explicit slash", create: "mkdir -p /a && touch /a/1", copy: "/a/1 /a/b/c/", expect: "ls -al /a/b/c/1 && ! ls -al /a/b/1"}, {name: "copy file to deeper directory without explicit slash", create: "mkdir -p /a && touch /a/1", copy: "/a/1 /a/b/c", expect: "ls -al /a/b/c && ! ls -al /a/b/1"}, {name: "copy directory to deeper directory without explicit slash", create: "mkdir -p /a && touch /a/1", copy: "/a /a/b/c", expect: "ls -al /a/b/c/1 && ! ls -al /a/b/1"}, + {name: "copy directory to root without explicit slash", create: "mkdir -p /a && touch /a/1", copy: "a /a", expect: "ls -al /a/1 && ! ls -al /a/a"}, + {name: "copy directory trailing to root without explicit slash", create: "mkdir -p /a && touch /a/1", copy: "a/. /a", expect: "ls -al /a/1 && ! ls -al /a/a"}, } for i, testCase := range testCases { name := fmt.Sprintf("%d", i) @@ -143,6 +145,7 @@ func TestCopyFrom(t *testing.T) { stages := imagebuilder.NewStages(node, b) if _, err := e.Stages(b, stages, ""); err != nil { + t.Log(out.String()) t.Fatal(err) } }) @@ -250,6 +253,18 @@ func TestConformanceInternal(t *testing.T) { Name: "directory", ContextDir: "testdata/dir", }, + { + Name: "copy to dir", + ContextDir: "testdata/copy", + }, + { + Name: "copy dir", + ContextDir: "testdata/copydir", + }, + { + Name: "copy to renamed file", + ContextDir: "testdata/copyrename", + }, { Name: "directory with slash", ContextDir: "testdata/overlapdir", @@ -391,8 +406,8 @@ func TestTransientMount(t *testing.T) { e.AllowPull = true e.Directory = "testdata" e.TransientMounts = []Mount{ - {SourcePath: "dir", DestinationPath: "/mountdir"}, - {SourcePath: "Dockerfile.env", DestinationPath: "/mountfile"}, + {SourcePath: "testdata/dir", DestinationPath: "/mountdir"}, + {SourcePath: "testdata/Dockerfile.env", DestinationPath: "/mountfile"}, } e.Tag = fmt.Sprintf("conformance%d", rand.Int63()) diff --git a/vendor/github.com/openshift/imagebuilder/dockerclient/copyinfo_test.go b/vendor/github.com/openshift/imagebuilder/dockerclient/copyinfo_test.go index 7828e3511850..24854441b945 100644 --- a/vendor/github.com/openshift/imagebuilder/dockerclient/copyinfo_test.go +++ b/vendor/github.com/openshift/imagebuilder/dockerclient/copyinfo_test.go @@ -17,6 +17,7 @@ func TestCalcCopyInfo(t *testing.T) { paths map[string]struct{} excludes []string rebaseNames map[string]string + check map[string]bool }{ { origPath: "subdir/*", @@ -106,6 +107,33 @@ func TestCalcCopyInfo(t *testing.T) { "Dockerfile": "copy/Dockerfile", }, }, + { + origPath: "Dockerfile", + dstPath: "copy", + rootPath: "testdata/singlefile", + allowWildcards: true, + errFn: nilErr, + paths: map[string]struct{}{ + "Dockerfile": {}, + }, + rebaseNames: map[string]string{ + "Dockerfile": "copy", + }, + }, + { + origPath: "Dockerfile", + dstPath: "copy", + check: map[string]bool{"copy": true}, + rootPath: "testdata/singlefile", + allowWildcards: true, + errFn: nilErr, + paths: map[string]struct{}{ + "Dockerfile": {}, + }, + rebaseNames: map[string]string{ + "Dockerfile": "copy/Dockerfile", + }, + }, { origPath: "existing/", dstPath: ".", @@ -185,6 +213,20 @@ func TestCalcCopyInfo(t *testing.T) { "subdir": "test", }, }, + { + origPath: "dir", + dstPath: "/dir", + check: map[string]bool{"dir": false}, + rootPath: "testdata/copydir", + allowWildcards: true, + errFn: nilErr, + paths: map[string]struct{}{ + "dir": {}, + }, + rebaseNames: map[string]string{ + "dir": "dir", + }, + }, } for i, test := range tests { @@ -211,7 +253,10 @@ func TestCalcCopyInfo(t *testing.T) { t.Errorf("did not see paths: %#v", expect) } - options := archiveOptionsFor(infos, test.dstPath, test.excludes) + options, err := archiveOptionsFor(infos, test.dstPath, test.excludes, testDirectoryCheck(test.check)) + if err != nil { + t.Fatal(err) + } if !reflect.DeepEqual(test.rebaseNames, options.RebaseNames) { t.Errorf("rebase names did not match:\n%#v\n%#v", test.rebaseNames, options.RebaseNames) } diff --git a/vendor/github.com/openshift/imagebuilder/dockerclient/directory.go b/vendor/github.com/openshift/imagebuilder/dockerclient/directory.go new file mode 100644 index 000000000000..4e0a9f8bf01a --- /dev/null +++ b/vendor/github.com/openshift/imagebuilder/dockerclient/directory.go @@ -0,0 +1,87 @@ +package dockerclient + +import ( + "archive/tar" + "context" + "io" + "io/ioutil" + + "github.com/golang/glog" + + docker "github.com/fsouza/go-dockerclient" +) + +type DirectoryCheck interface { + IsDirectory(path string) (bool, error) +} + +type directoryCheck struct { + containerID string + client *docker.Client +} + +func newDirectoryCheck(client *docker.Client, containerID string) *directoryCheck { + return &directoryCheck{ + containerID: containerID, + client: client, + } +} + +func (c *directoryCheck) IsDirectory(path string) (bool, error) { + if path == "/" || path == "." || path == "./" { + return true, nil + } + + dir, err := isContainerPathDirectory(c.client, c.containerID, path) + if err != nil { + return false, err + } + + return dir, nil +} + +func isContainerPathDirectory(client *docker.Client, containerID, path string) (bool, error) { + pr, pw := io.Pipe() + defer pw.Close() + ctx, cancel := context.WithCancel(context.TODO()) + go func() { + err := client.DownloadFromContainer(containerID, docker.DownloadFromContainerOptions{ + OutputStream: pw, + Path: path, + Context: ctx, + }) + if err != nil { + if apiErr, ok := err.(*docker.Error); ok && apiErr.Status == 404 { + glog.V(4).Infof("path %s did not exist in container %s: %v", path, containerID, err) + err = nil + } + if err != nil && err != context.Canceled { + glog.V(6).Infof("error while checking directory contents for container %s at path %s: %v", containerID, path, err) + } + } + pw.CloseWithError(err) + }() + + tr := tar.NewReader(pr) + + h, err := tr.Next() + if err != nil { + if err == io.EOF { + err = nil + } + return false, err + } + + glog.V(4).Infof("Retrieved first header from container %s at path %s: %#v", containerID, path, h) + + // take the remainder of the input and discard it + go func() { + cancel() + n, err := io.Copy(ioutil.Discard, pr) + if n > 0 || err != nil { + glog.V(6).Infof("Discarded %d bytes from end of container directory check, and got error: %v", n, err) + } + }() + + return h.FileInfo().IsDir(), nil +} diff --git a/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copy/Dockerfile b/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copy/Dockerfile new file mode 100644 index 000000000000..815de493ba73 --- /dev/null +++ b/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copy/Dockerfile @@ -0,0 +1,3 @@ +FROM centos:7 +COPY script /usr/bin +RUN ls -al /usr/bin/script \ No newline at end of file diff --git a/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copy/script b/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copy/script new file mode 100644 index 000000000000..c3c3f3f53f62 --- /dev/null +++ b/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copy/script @@ -0,0 +1,2 @@ +#!/bin/bash +exit 0 \ No newline at end of file diff --git a/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copydir/Dockerfile b/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copydir/Dockerfile new file mode 100644 index 000000000000..92c53fdf6dad --- /dev/null +++ b/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copydir/Dockerfile @@ -0,0 +1,3 @@ +FROM centos:7 +COPY dir /dir +RUN ls -al /dir/file \ No newline at end of file diff --git a/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copydir/dir/file b/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copydir/dir/file new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copyrename/Dockerfile b/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copyrename/Dockerfile new file mode 100644 index 000000000000..575bf2cd4de4 --- /dev/null +++ b/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copyrename/Dockerfile @@ -0,0 +1,3 @@ +FROM centos:7 +COPY file1 /usr/bin/file2 +RUN ls -al /usr/bin/file2 && ! ls -al /usr/bin/file1 \ No newline at end of file diff --git a/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copyrename/file1 b/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copyrename/file1 new file mode 100644 index 000000000000..c3c3f3f53f62 --- /dev/null +++ b/vendor/github.com/openshift/imagebuilder/dockerclient/testdata/copyrename/file1 @@ -0,0 +1,2 @@ +#!/bin/bash +exit 0 \ No newline at end of file diff --git a/vendor/github.com/openshift/service-serving-cert-signer/Dockerfile b/vendor/github.com/openshift/service-serving-cert-signer/Dockerfile new file mode 100644 index 000000000000..6243895c8c2d --- /dev/null +++ b/vendor/github.com/openshift/service-serving-cert-signer/Dockerfile @@ -0,0 +1,11 @@ +# +# This is the integrated OpenShift Service Serving Cert Signer. It signs serving certificates for use inside the platform. +# +# The standard name for this image is openshift/origin-service-serving-cert-signer +# +FROM openshift/origin-release:golang-1.10 +COPY . /go/src/github.com/openshift/service-serving-cert-signer +RUN cd /go/src/github.com/openshift/service-serving-cert-signer && go build ./cmd/service-serving-cert-signer + +FROM centos:7 +COPY --from=0 /go/src/github.com/openshift/service-serving-cert-signer/service-serving-cert-signer /usr/bin/service-serving-cert-signer diff --git a/vendor/github.com/openshift/service-serving-cert-signer/Makefile b/vendor/github.com/openshift/service-serving-cert-signer/Makefile index b7fba95c867b..4e48a3a570ac 100644 --- a/vendor/github.com/openshift/service-serving-cert-signer/Makefile +++ b/vendor/github.com/openshift/service-serving-cert-signer/Makefile @@ -120,6 +120,6 @@ build-rpms: # # Example: # make build-images -build-images: build-rpms +build-images: hack/build-images.sh .PHONY: build-images diff --git a/vendor/github.com/openshift/service-serving-cert-signer/hack/build-images.sh b/vendor/github.com/openshift/service-serving-cert-signer/hack/build-images.sh index c45ebc5884f1..d3eb62c3c3e3 100755 --- a/vendor/github.com/openshift/service-serving-cert-signer/hack/build-images.sh +++ b/vendor/github.com/openshift/service-serving-cert-signer/hack/build-images.sh @@ -20,6 +20,13 @@ os::util::ensure::gopath_binary_exists imagebuilder os::build::release::check_for_rpms # we need to mount RPMs into the container builds for installation -OS_BUILD_IMAGE_ARGS="${OS_BUILD_IMAGE_ARGS:-} -mount ${OS_OUTPUT_RPMPATH}/:/srv/origin-local-release/" +cat < "${OS_OUTPUT_RPMPATH}/_local.repo" +[origin-local-release] +name = OpenShift Origin Release from Local Source +baseurl = file:///srv/origin-local-release/ +gpgcheck = 0 +enabled = 0 +END +OS_BUILD_IMAGE_ARGS="${OS_BUILD_IMAGE_ARGS:-} -mount ${OS_OUTPUT_RPMPATH}/:/srv/origin-local-release/ -mount ${OS_OUTPUT_RPMPATH}/_local.repo:/etc/yum.repos.d/origin-local-release.repo" os::build::images \ No newline at end of file diff --git a/vendor/github.com/openshift/service-serving-cert-signer/hack/lib/constants.sh b/vendor/github.com/openshift/service-serving-cert-signer/hack/lib/constants.sh index f615360b83d4..6f74078ac614 100755 --- a/vendor/github.com/openshift/service-serving-cert-signer/hack/lib/constants.sh +++ b/vendor/github.com/openshift/service-serving-cert-signer/hack/lib/constants.sh @@ -146,5 +146,5 @@ readonly OS_ALL_IMAGES=( # os::build::images builds all images in this repo. function os::build::images() { tag_prefix="${OS_IMAGE_PREFIX:-"openshift/origin"}" - os::build::image "${tag_prefix}-service-serving-cert-signer" images/service-serving-cert-signer + os::build::image "${tag_prefix}-service-serving-cert-signer" . } \ No newline at end of file diff --git a/vendor/github.com/openshift/service-serving-cert-signer/hack/lib/test/junit.sh b/vendor/github.com/openshift/service-serving-cert-signer/hack/lib/test/junit.sh index 21026f89faff..45cdf4b9104b 100644 --- a/vendor/github.com/openshift/service-serving-cert-signer/hack/lib/test/junit.sh +++ b/vendor/github.com/openshift/service-serving-cert-signer/hack/lib/test/junit.sh @@ -143,62 +143,3 @@ function os::test::junit::reconcile_output() { done } readonly -f os::test::junit::reconcile_output - -# os::test::junit::generate_report determines which type of report is to -# be generated and does so from the raw output of the tests. -# -# Globals: -# - JUNIT_REPORT_OUTPUT -# - ARTIFACT_DIR -# Arguments: -# None -# Returns: -# None -function os::test::junit::generate_report() { - if [[ -z "${JUNIT_REPORT_OUTPUT:-}" || - -n "${JUNIT_REPORT_OUTPUT:-}" && ! -s "${JUNIT_REPORT_OUTPUT:-}" ]]; then - # we can't generate a report - return - fi - - if grep -q "=== END TEST CASE ===" "${JUNIT_REPORT_OUTPUT}"; then - os::test::junit::reconcile_output - os::test::junit::check_test_counters - os::test::junit::internal::generate_report "oscmd" - else - os::test::junit::internal::generate_report "gotest" - fi -} - -# os::test::junit::internal::generate_report generats an XML jUnit -# report for either `os::cmd` or `go test`, based on the passed -# argument. If the `junitreport` binary is not present, it will be built. -# -# Globals: -# - JUNIT_REPORT_OUTPUT -# - ARTIFACT_DIR -# Arguments: -# - 1: specify which type of tests command output should junitreport read -# Returns: -# export JUNIT_REPORT_NUM_FAILED -function os::test::junit::internal::generate_report() { - local report_type="$1" - os::util::ensure::built_binary_exists 'junitreport' - - local report_file - report_file="$( mktemp "${ARTIFACT_DIR}/${report_type}_report_XXXXX" ).xml" - os::log::info "jUnit XML report placed at $( os::util::repository_relative_path ${report_file} )" - junitreport --type "${report_type}" \ - --suites nested \ - --roots github.com/openshift/origin \ - --output "${report_file}" \ - <"${JUNIT_REPORT_OUTPUT}" - - local summary - summary=$( junitreport summarize <"${report_file}" ) - - JUNIT_REPORT_NUM_FAILED="$( grep -oE "[0-9]+ failed" <<<"${summary}" )" - export JUNIT_REPORT_NUM_FAILED - - echo "${summary}" -} \ No newline at end of file diff --git a/vendor/github.com/openshift/service-serving-cert-signer/hack/lib/util/golang.sh b/vendor/github.com/openshift/service-serving-cert-signer/hack/lib/util/golang.sh index 3bbb1ebf48b0..2ae80ce24e30 100644 --- a/vendor/github.com/openshift/service-serving-cert-signer/hack/lib/util/golang.sh +++ b/vendor/github.com/openshift/service-serving-cert-signer/hack/lib/util/golang.sh @@ -1,22 +1,3 @@ #!/bin/bash # # This library holds golang related utility functions. - -# os::golang::verify_go_version ensure the go tool exists and is a viable version. -function os::golang::verify_go_version() { - os::util::ensure::system_binary_exists 'go' - - local go_version - go_version=($(go version)) - if [[ "${go_version[2]}" != go1.8* ]]; then - os::log::info "Detected go version: ${go_version[*]}." - if [[ -z "${PERMISSIVE_GO:-}" ]]; then - os::log::fatal "Please install Go version ${OS_REQUIRED_GO_VERSION} or use PERMISSIVE_GO=y to bypass this check." - else - os::log::warning "Detected golang version doesn't match required Go version." - os::log::warning "This version mismatch could lead to differences in execution between this run and the CI systems." - return 0 - fi - fi -} -readonly -f os::golang::verify_go_version diff --git a/vendor/github.com/openshift/service-serving-cert-signer/hack/test-go.sh b/vendor/github.com/openshift/service-serving-cert-signer/hack/test-go.sh index 61743037de29..08d1731aa261 100755 --- a/vendor/github.com/openshift/service-serving-cert-signer/hack/test-go.sh +++ b/vendor/github.com/openshift/service-serving-cert-signer/hack/test-go.sh @@ -21,14 +21,6 @@ function cleanup() { return_code=$? - os::test::junit::generate_report - if [[ "${JUNIT_REPORT_NUM_FAILED:-}" == "0 failed" ]]; then - if [[ "${return_code}" -ne "0" ]]; then - os::log::warning "While the jUnit report found no failed tests, the \`go test\` process failed." - os::log::warning "This usually means that the unit test suite failed to compile." - fi - fi - os::util::describe_return_code "${return_code}" exit "${return_code}" } @@ -135,11 +127,15 @@ if [[ -n "${junit_report}" ]]; then # we don't care if the `go test` fails in this pipe, as we want to generate the report and summarize the output anyway set +o pipefail - go test -i ${gotest_flags} ${test_packages} - go test ${gotest_flags} ${test_packages} 2>"${test_error_file}" | tee "${JUNIT_REPORT_OUTPUT}" + os::util::ensure::built_binary_exists 'gotest2junit' + report_file="$( mktemp "${ARTIFACT_DIR}/unit_report_XXXXX" ).xml" + go test -json ${gotest_flags} ${test_packages} 2>"${test_error_file}" | tee "${JUNIT_REPORT_OUTPUT}" | gotest2junit > "${report_file}" test_return_code="${PIPESTATUS[0]}" + gzip "${test_error_file}" -c > "${ARTIFACT_DIR}/unit-error.log.gz" + gzip "${JUNIT_REPORT_OUTPUT}" -c > "${ARTIFACT_DIR}/unit.log.gz" + set -o pipefail if [[ -s "${test_error_file}" ]]; then @@ -162,7 +158,6 @@ $( cat "${test_error_file}") " elif [[ -n "${coverage_output_dir}" ]]; then # we need to generate coverage reports - go test -i ${gotest_flags} ${test_packages} for test_package in ${test_packages}; do mkdir -p "${coverage_output_dir}/${test_package}" local_gotest_flags="${gotest_flags} -coverprofile=${coverage_output_dir}/${test_package}/profile.out" @@ -187,6 +182,5 @@ elif [[ -n "${dlv_debug}" ]]; then dlv test ${test_packages} else # we need to generate neither jUnit XML nor coverage reports - go test -i ${gotest_flags} ${test_packages} go test ${gotest_flags} ${test_packages} fi diff --git a/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-gofmt.sh b/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-gofmt.sh index 95d477f61014..3ff24e84c394 100755 --- a/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-gofmt.sh +++ b/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-gofmt.sh @@ -8,8 +8,6 @@ function cleanup() { } trap "cleanup" EXIT -os::golang::verify_go_version - bad_files=$(os::util::list_go_src_files | xargs gofmt -s -l) if [[ -n "${bad_files}" ]]; then os::log::warning "!!! gofmt needs to be run on the listed files" diff --git a/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-golint.sh b/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-golint.sh index 3c7ba6e27cb7..c61e20d83228 100755 --- a/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-golint.sh +++ b/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-golint.sh @@ -1,7 +1,6 @@ #!/bin/bash source "$(dirname "${BASH_SOURCE}")/lib/init.sh" -os::golang::verify_go_version os::util::ensure::system_binary_exists 'golint' arg="${1:-""}" diff --git a/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-govet.sh b/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-govet.sh index c8cee8e9be51..f1746cdf3f10 100755 --- a/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-govet.sh +++ b/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-govet.sh @@ -8,8 +8,6 @@ function cleanup() { } trap "cleanup" EXIT -os::golang::verify_go_version - govet_blacklist=( "${OS_GOVET_BLACKLIST[@]-}" ) function govet_blacklist_contains() { diff --git a/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-imports.sh b/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-imports.sh index 6e00177feac1..a5c510499343 100755 --- a/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-imports.sh +++ b/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-imports.sh @@ -6,7 +6,6 @@ source "$(dirname "${BASH_SOURCE}")/lib/init.sh" function cleanup() { return_code=$? - os::test::junit::generate_report os::util::describe_return_code "${return_code}" exit "${return_code}" } diff --git a/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-upstream-commits.sh b/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-upstream-commits.sh index d3c2f988b21e..b5fc0aa4cb81 100755 --- a/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-upstream-commits.sh +++ b/vendor/github.com/openshift/service-serving-cert-signer/hack/verify-upstream-commits.sh @@ -3,7 +3,6 @@ source "$(dirname "${BASH_SOURCE}")/lib/init.sh" function cleanup() { return_code=$? - os::test::junit::generate_report os::util::describe_return_code "${return_code}" exit "${return_code}" } diff --git a/vendor/github.com/openshift/service-serving-cert-signer/images/service-serving-cert-signer/.cccp.yml b/vendor/github.com/openshift/service-serving-cert-signer/images/service-serving-cert-signer/.cccp.yml deleted file mode 100644 index f555da819426..000000000000 --- a/vendor/github.com/openshift/service-serving-cert-signer/images/service-serving-cert-signer/.cccp.yml +++ /dev/null @@ -1 +0,0 @@ -job-id: origin-service-serving-cert-signer diff --git a/vendor/github.com/openshift/service-serving-cert-signer/images/service-serving-cert-signer/Dockerfile b/vendor/github.com/openshift/service-serving-cert-signer/images/service-serving-cert-signer/Dockerfile deleted file mode 100644 index c6c50457d76a..000000000000 --- a/vendor/github.com/openshift/service-serving-cert-signer/images/service-serving-cert-signer/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# -# This is the integrated OpenShift Service Serving Cert Signer. It signs serving certificates for use inside the platform. -# -# The standard name for this image is openshift/origin-service-serving-cert-signer -# -FROM openshift/origin-base - -RUN INSTALL_PKGS="origin-service-serving-cert-signer" && \ - yum --enablerepo=origin-local-release install -y ${INSTALL_PKGS} && \ - rpm -V ${INSTALL_PKGS} && \ - yum clean all - -LABEL io.k8s.display-name="OpenShift Service Serving Cert Signer" \ - io.k8s.description="This is a component of OpenShift that signs serving certificates for use inside the platform." \ - io.openshift.tags="openshift" - -# The process doesn't require a root user. -USER 1001 - -CMD /usr/bin/service-serving-cert-signer diff --git a/vendor/github.com/openshift/service-serving-cert-signer/images/service-serving-cert-signer/bin/.gitignore b/vendor/github.com/openshift/service-serving-cert-signer/images/service-serving-cert-signer/bin/.gitignore deleted file mode 100644 index d6b7ef32c847..000000000000 --- a/vendor/github.com/openshift/service-serving-cert-signer/images/service-serving-cert-signer/bin/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!.gitignore diff --git a/vendor/github.com/openshift/service-serving-cert-signer/pkg/operator/merge.go b/vendor/github.com/openshift/service-serving-cert-signer/pkg/operator/merge.go new file mode 100644 index 000000000000..952373d25acb --- /dev/null +++ b/vendor/github.com/openshift/service-serving-cert-signer/pkg/operator/merge.go @@ -0,0 +1,91 @@ +package operator + +import ( + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + kyaml "k8s.io/apimachinery/pkg/util/yaml" +) + +func mergeProcessConfig(defaultConfigYAML, userConfigYAML []byte, specialCases map[string]mergeFunc) ([]byte, error) { + defaultConfigJSON, err := kyaml.ToJSON(defaultConfigYAML) + if err != nil { + return nil, err + } + defaultConfigObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, defaultConfigJSON) + if err != nil { + return nil, err + } + defaultConfig := defaultConfigObj.(*unstructured.Unstructured) + + if len(userConfigYAML) > 0 { + userConfigJSON, err := kyaml.ToJSON(userConfigYAML) + if err != nil { + glog.Warning(err) + // maybe it's just yaml + userConfigJSON = userConfigYAML + } + userConfigObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, userConfigJSON) + if err != nil { + return nil, err + } + userConfig := userConfigObj.(*unstructured.Unstructured) + if err := mergeConfig(defaultConfig.Object, userConfig.Object, "", specialCases); err != nil { + return nil, err + } + } + + configBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, defaultConfig) + if err != nil { + return nil, err + } + return configBytes, nil +} + +type mergeFunc func(dst, src interface{}, currentPath string) (interface{}, error) + +// mergeConfig overwrites entries in curr by additional. It modifies curr. +func mergeConfig(curr, additional map[string]interface{}, currentPath string, specialCases map[string]mergeFunc) error { + for additionalKey, additionalVal := range additional { + fullKey := currentPath + "." + additionalKey + specialCase, ok := specialCases[fullKey] + if ok { + var err error + curr[additionalKey], err = specialCase(curr[additionalKey], additionalVal, currentPath) + if err != nil { + return err + } + continue + } + + currVal, ok := curr[additionalKey] + if !ok { + curr[additionalKey] = additionalVal + continue + } + + // only some scalars are accepted + switch castVal := additionalVal.(type) { + case map[string]interface{}: + currValAsMap, ok := currVal.(map[string]interface{}) + if !ok { + currValAsMap = map[string]interface{}{} + curr[additionalKey] = currValAsMap + } + + err := mergeConfig(currValAsMap, castVal, fullKey, specialCases) + if err != nil { + return err + } + continue + + default: + if err := unstructured.SetNestedField(curr, castVal, additionalKey); err != nil { + return err + } + } + + } + + return nil +} diff --git a/vendor/github.com/openshift/service-serving-cert-signer/pkg/operator/merge_test.go b/vendor/github.com/openshift/service-serving-cert-signer/pkg/operator/merge_test.go new file mode 100644 index 000000000000..c2a200c66ae5 --- /dev/null +++ b/vendor/github.com/openshift/service-serving-cert-signer/pkg/operator/merge_test.go @@ -0,0 +1,122 @@ +package operator + +import ( + "reflect" + "strings" + "testing" + + "k8s.io/apimachinery/pkg/util/diff" +) + +func TestMergeConfig(t *testing.T) { + tests := []struct { + name string + curr map[string]interface{} + additional map[string]interface{} + specialCases map[string]mergeFunc + + expected map[string]interface{} + expectedErr string + }{ + { + name: "add non-conflicting", + curr: map[string]interface{}{ + "alpha": "first", + "bravo": map[string]interface{}{ + "apple": "one", + }, + }, + additional: map[string]interface{}{ + "bravo": map[string]interface{}{ + "banana": "two", + "cake": map[string]interface{}{ + "armadillo": "uno", + }, + }, + "charlie": "third", + }, + + expected: map[string]interface{}{ + "alpha": "first", + "bravo": map[string]interface{}{ + "apple": "one", + "banana": "two", + "cake": map[string]interface{}{ + "armadillo": "uno", + }, + }, + "charlie": "third", + }, + }, + { + name: "add conflicting, replace type", + curr: map[string]interface{}{ + "alpha": "first", + "bravo": map[string]interface{}{ + "apple": "one", + }, + }, + additional: map[string]interface{}{ + "bravo": map[string]interface{}{ + "apple": map[string]interface{}{ + "armadillo": "uno", + }, + }, + }, + + expected: map[string]interface{}{ + "alpha": "first", + "bravo": map[string]interface{}{ + "apple": map[string]interface{}{ + "armadillo": "uno", + }, + }, + }, + }, + { + name: "nil out", + curr: map[string]interface{}{ + "alpha": "first", + }, + additional: map[string]interface{}{ + "alpha": nil, + }, + + expected: map[string]interface{}{ + "alpha": nil, + }, + }, + { + name: "force empty", + curr: map[string]interface{}{ + "alpha": "first", + }, + additional: map[string]interface{}{ + "alpha": "", + }, + + expected: map[string]interface{}{ + "alpha": "", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := mergeConfig(test.curr, test.additional, "", test.specialCases) + switch { + case err == nil && len(test.expectedErr) == 0: + case err == nil && len(test.expectedErr) != 0: + t.Fatalf("missing %q", test.expectedErr) + case err != nil && len(test.expectedErr) == 0: + t.Fatal(err) + case err != nil && len(test.expectedErr) != 0 && !strings.Contains(err.Error(), test.expectedErr): + t.Fatalf("expected %q, got %q", test.expectedErr, err) + } + + if !reflect.DeepEqual(test.expected, test.curr) { + t.Error(diff.ObjectDiff(test.expected, test.curr)) + } + }) + } +} diff --git a/vendor/github.com/openshift/service-serving-cert-signer/pkg/operator/sync_v310_00.go b/vendor/github.com/openshift/service-serving-cert-signer/pkg/operator/sync_v310_00.go index a27bfdb717f8..eb2477821a2d 100644 --- a/vendor/github.com/openshift/service-serving-cert-signer/pkg/operator/sync_v310_00.go +++ b/vendor/github.com/openshift/service-serving-cert-signer/pkg/operator/sync_v310_00.go @@ -9,8 +9,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" operatorsv1alpha1 "github.com/openshift/api/operator/v1alpha1" scsv1alpha1 "github.com/openshift/api/servicecertsigner/v1alpha1" @@ -41,6 +39,9 @@ func sync_v310_00_to_latest(c ServiceCertSignerOperator, operatorConfig *scsv1al if signingVersionAvailability.ReadyReplicas > 0 && apiServiceInjectorVersionAvailability.ReadyReplicas > 0 { mergedVersionAvailability.ReadyReplicas = 1 } + for _, err := range allErrors { + mergedVersionAvailability.Errors = append(mergedVersionAvailability.Errors, err.Error()) + } return mergedVersionAvailability, allErrors } @@ -213,12 +214,7 @@ func manageSigningSecret(c ServiceCertSignerOperator) (*corev1.Secret, bool, err } func ensureServingSignerConfigMap_v310_00_to_latest(c ServiceCertSignerOperator, options scsv1alpha1.ServiceCertSignerOperatorConfigSpec) (*corev1.ConfigMap, bool, error) { - // TODO use an unstructured object to merge configs - config, err := readServiceServingCertSignerConfig(v310_00_assets.MustAsset("v3.10.0/service-serving-cert-signer-controller/defaultconfig.yaml")) - if err != nil { - return nil, false, err - } - configBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, config) + configBytes, err := mergeProcessConfig(v310_00_assets.MustAsset("v3.10.0/service-serving-cert-signer-controller/defaultconfig.yaml"), options.ServiceServingCertSignerConfig.Raw, nil) if err != nil { return nil, false, err } @@ -251,12 +247,7 @@ func serviceServingCertSignerName() string { } func ensureAPIServiceInjectorConfigMap_v310_00_to_latest(c ServiceCertSignerOperator, options scsv1alpha1.ServiceCertSignerOperatorConfigSpec) (*corev1.ConfigMap, bool, error) { - // TODO use an unstructured object to merge configs - config, err := readServiceServingCertSignerConfig(v310_00_assets.MustAsset("v3.10.0/apiservice-cabundle-controller/defaultconfig.yaml")) - if err != nil { - return nil, false, err - } - configBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, config) + configBytes, err := mergeProcessConfig(v310_00_assets.MustAsset("v3.10.0/apiservice-cabundle-controller/defaultconfig.yaml"), options.APIServiceCABundleInjectorConfig.Raw, nil) if err != nil { return nil, false, err } diff --git a/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/gotest2junit.go b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/gotest2junit.go new file mode 100644 index 000000000000..026d43698836 --- /dev/null +++ b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/gotest2junit.go @@ -0,0 +1,211 @@ +package main + +import ( + "bufio" + "encoding/json" + "encoding/xml" + "flag" + "fmt" + "io" + "os" + "sort" + "strings" + "time" + + "github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api" +) + +type Record struct { + Package string + Test string + + Time time.Time + Action string + Output string + Elapsed float64 +} + +type testSuite struct { + suite *api.TestSuite + tests map[string]*api.TestCase +} + +func main() { + summarize := false + verbose := false + flag.BoolVar(&summarize, "summary", true, "display a summary as items are processed") + flag.BoolVar(&verbose, "v", false, "display passing results") + flag.Parse() + + if err := process(os.Stdin, summarize, verbose); err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } +} + +func process(r io.Reader, summarize, verbose bool) error { + suites, err := stream(r, summarize, verbose) + if err != nil { + return err + } + obj := newTestSuites(suites) + out, err := xml.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + fmt.Fprintf(os.Stdout, "%s\n", string(out)) + return nil +} + +func newTestSuites(suites map[string]*testSuite) *api.TestSuites { + all := &api.TestSuites{} + for _, suite := range suites { + for _, test := range suite.suite.TestCases { + suite.suite.NumTests++ + if test.SkipMessage != nil { + suite.suite.NumSkipped++ + continue + } + if test.FailureOutput != nil { + suite.suite.NumFailed++ + continue + } + } + // suites with no tests are usually empty packages, ignore them + if suite.suite.NumTests == 0 { + continue + } + // always return the test cases in consistent order + sort.Slice(suite.suite.TestCases, func(i, j int) bool { + return suite.suite.TestCases[i].Name < suite.suite.TestCases[j].Name + }) + all.Suites = append(all.Suites, suite.suite) + } + // always return the test suites in consistent order + sort.Slice(all.Suites, func(i, j int) bool { + return all.Suites[i].Name < all.Suites[j].Name + }) + return all +} + +func stream(r io.Reader, summarize, verbose bool) (map[string]*testSuite, error) { + suites := make(map[string]*testSuite) + defaultTest := &api.TestCase{ + Name: "build and execution", + } + defaultSuite := &testSuite{ + suite: &api.TestSuite{Name: "go test", TestCases: []*api.TestCase{defaultTest}}, + } + suites[""] = defaultSuite + + rdr := bufio.NewReader(r) + for { + // some output from go test -json is not valid JSON - read the line to see whether it + // starts with { - if not, just mirror it to stderr and continue. + line, err := rdr.ReadString('\n') + if err != nil { + if err != io.EOF { + return suites, err + } + break + } + if len(line) == 0 || line[0] != '{' { + defaultTest.SystemOut += line + if strings.HasPrefix(line, "FAIL") { + defaultTest.FailureOutput = &api.FailureOutput{} + } + fmt.Fprint(os.Stderr, line) + continue + } + var r Record + if err := json.Unmarshal([]byte(line), &r); err != nil { + if err == io.EOF { + return suites, nil + } + fmt.Fprintf(os.Stderr, "error: Unable to parse remainder of output %v\n", err) + return suites, nil + } + + suite, ok := suites[r.Package] + if !ok { + suite = &testSuite{ + suite: &api.TestSuite{ + Name: r.Package, + }, + tests: make(map[string]*api.TestCase), + } + suites[r.Package] = suite + } + + // if this is package level output, we only care about pass/fail duration + if len(r.Test) == 0 { + switch r.Action { + case "pass", "fail": + suite.suite.Duration = r.Elapsed + } + continue + } + + test, ok := suite.tests[r.Test] + if !ok { + test = &api.TestCase{ + Name: r.Test, + } + suite.suite.TestCases = append(suite.suite.TestCases, test) + suite.tests[r.Test] = test + } + + switch r.Action { + case "run": + case "pause": + case "cont": + case "bench": + case "skip": + if summarize { + fmt.Fprintf(os.Stderr, "SKIP: %s %s\n", r.Package, r.Test) + } + test.SkipMessage = &api.SkipMessage{ + Message: r.Output, + } + case "pass": + if summarize && verbose { + fmt.Fprintf(os.Stderr, "PASS: %s %s %s\n", r.Package, r.Test, time.Duration(r.Elapsed*float64(time.Second))) + } + test.SystemOut = "" + test.Duration = r.Elapsed + case "fail": + if summarize { + fmt.Fprintf(os.Stderr, "FAIL: %s %s %s\n", r.Package, r.Test, time.Duration(r.Elapsed*float64(time.Second))) + } + test.Duration = r.Elapsed + if len(r.Output) == 0 { + r.Output = test.SystemOut + if len(r.Output) > 50 { + r.Output = r.Output[:50] + " ..." + } + } + test.FailureOutput = &api.FailureOutput{ + Message: r.Output, + Output: r.Output, + } + case "output": + test.SystemOut += r.Output + default: + // usually a bug in go test -json + out := fmt.Sprintf("error: Unrecognized go test action %s: %#v\n", r.Action, r) + defaultTest.SystemOut += line + defaultTest.SystemOut += out + defaultTest.FailureOutput = &api.FailureOutput{} + fmt.Fprintf(os.Stderr, out) + } + } + + // if we recorded any failure output + if defaultTest.FailureOutput != nil { + defaultTest.FailureOutput.Message = "Some packages failed during test execution" + defaultTest.FailureOutput.Output = defaultTest.SystemOut + defaultTest.SystemOut = "" + } + + return suites, nil +} diff --git a/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/junit.xsd b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/junit.xsd new file mode 100644 index 000000000000..92552b33db5d --- /dev/null +++ b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/junit.xsd @@ -0,0 +1,203 @@ + + + + JUnit test result schema for the Apache Ant JUnit and JUnitReport tasks +Copyright © 2011, Windy Road Technology Pty. Limited +The Apache Ant JUnit XML Schema is distributed under the terms of the GNU Lesser General Public License (LGPL) http://www.gnu.org/licenses/lgpl.html +Permission to waive conditions of this license may be requested from Windy Road Support (http://windyroad.org/support). + + + + + + + + + + Contains an aggregation of testsuite results + + + + + + + + + + Derived from testsuite/@name in the non-aggregated documents + + + + + Starts at '0' for the first testsuite and is incremented by 1 for each following testsuite + + + + + + + + + + + + Contains the results of exexuting a testsuite + + + + + Properties (e.g., environment settings) set during test execution + + + + + + + + + + + + + + + + + + + + + + + + Indicates that the test errored. An errored test is one that had an unanticipated problem. e.g., an unchecked throwable; or a problem with the implementation of the test. Contains as a text node relevant data for the error, e.g., a stack trace + + + + + + + The error message. e.g., if a java exception is thrown, the return value of getMessage() + + + + + The type of error that occured. e.g., if a java execption is thrown the full class name of the exception. + + + + + + + + + Indicates that the test failed. A failure is a test which the code has explicitly failed by using the mechanisms for that purpose. e.g., via an assertEquals. Contains as a text node relevant data for the failure, e.g., a stack trace + + + + + + + The message specified in the assert + + + + + The type of the assert. + + + + + + + + + + Name of the test method + + + + + Full class name for the class the test method is in. + + + + + Time taken (in seconds) to execute the test + + + + + + + Data that was written to standard out while the test was executed + + + + + + + + + + Data that was written to standard error while the test was executed + + + + + + + + + + + Full class name of the test for non-aggregated testsuite documents. Class name without the package for aggregated testsuites documents + + + + + + + + + + when the test was executed. Timezone may not be specified. + + + + + Host on which the tests were executed. 'localhost' should be used if the hostname cannot be determined. + + + + + + + + + + The total number of tests in the suite + + + + + The total number of tests in the suite that failed. A failure is a test which the code has explicitly failed by using the mechanisms for that purpose. e.g., via an assertEquals + + + + + The total number of tests in the suite that errorrd. An errored test is one that had an unanticipated problem. e.g., an unchecked throwable; or a problem with the implementation of the test. + + + + + Time taken (in seconds) to execute the tests in the suite + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/string.go b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/string.go new file mode 100644 index 000000000000..be8e650f4af5 --- /dev/null +++ b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/string.go @@ -0,0 +1,37 @@ +package api + +import "fmt" + +// This file implements Stringer for the API types for ease of debugging + +func (t *TestSuites) String() string { + return fmt.Sprintf("Test Suites with suites: %s.", t.Suites) +} + +func (t *TestSuite) String() string { + childDescriptions := []string{} + for _, child := range t.Children { + childDescriptions = append(childDescriptions, child.String()) + } + return fmt.Sprintf("Test Suite %q with properties: %s, %d test cases, of which %d failed and %d were skipped: %s, and children: %s.", t.Name, t.Properties, t.NumTests, t.NumFailed, t.NumSkipped, t.TestCases, childDescriptions) +} + +func (t *TestCase) String() string { + var result, message, output string + result = "passed" + if t.SkipMessage != nil { + result = "skipped" + message = t.SkipMessage.Message + } + if t.FailureOutput != nil { + result = "failed" + message = t.FailureOutput.Message + output = t.FailureOutput.Output + } + + return fmt.Sprintf("Test Case %q %s after %f seconds with message %q and output %q.", t.Name, result, t.Duration, message, output) +} + +func (p *TestSuiteProperty) String() string { + return fmt.Sprintf("%q=%q", p.Name, p.Value) +} diff --git a/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/test_case.go b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/test_case.go new file mode 100644 index 000000000000..ec83e2dddde3 --- /dev/null +++ b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/test_case.go @@ -0,0 +1,30 @@ +package api + +import "time" + +// SetDuration sets the runtime duration of the test case +func (t *TestCase) SetDuration(duration string) error { + parsedDuration, err := time.ParseDuration(duration) + if err != nil { + return err + } + + // we round to the millisecond on duration + t.Duration = float64(int(parsedDuration.Seconds()*1000)) / 1000 + return nil +} + +// MarkSkipped marks the test as skipped with the given message +func (t *TestCase) MarkSkipped(message string) { + t.SkipMessage = &SkipMessage{ + Message: message, + } +} + +// MarkFailed marks the test as failed with the given message and output +func (t *TestCase) MarkFailed(message, output string) { + t.FailureOutput = &FailureOutput{ + Message: message, + Output: output, + } +} diff --git a/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/test_suite.go b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/test_suite.go new file mode 100644 index 000000000000..1e36d06ca33a --- /dev/null +++ b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/test_suite.go @@ -0,0 +1,67 @@ +package api + +import "time" + +// AddProperty adds a property to the test suite, deduplicating multiple additions of the same property +// by overwriting the previous record to reflect the new values +func (t *TestSuite) AddProperty(name, value string) { + for _, property := range t.Properties { + if property.Name == name { + property.Value = value + return + } + } + + t.Properties = append(t.Properties, &TestSuiteProperty{Name: name, Value: value}) +} + +// AddTestCase adds a test case to the test suite and updates test suite metrics as necessary +func (t *TestSuite) AddTestCase(testCase *TestCase) { + t.NumTests += 1 + + switch { + case testCase.SkipMessage != nil: + t.NumSkipped += 1 + case testCase.FailureOutput != nil: + t.NumFailed += 1 + default: + // we do not preserve output on tests that are not failures or skips + testCase.SystemOut = "" + testCase.SystemErr = "" + } + + t.Duration += testCase.Duration + // we round to the millisecond on duration + t.Duration = float64(int(t.Duration*1000)) / 1000 + + t.TestCases = append(t.TestCases, testCase) +} + +// SetDuration sets the duration of the test suite if this value is not calculated by aggregating the durations +// of all of the substituent test cases. This should *not* be used if the total duration of the test suite is +// calculated as that sum, as AddTestCase will handle that case. +func (t *TestSuite) SetDuration(duration string) error { + parsedDuration, err := time.ParseDuration(duration) + if err != nil { + return err + } + + // we round to the millisecond on duration + t.Duration = float64(int(parsedDuration.Seconds()*1000)) / 1000 + return nil +} + +// ByName implements sort.Interface for []*TestSuite based on the Name field +type ByName []*TestSuite + +func (n ByName) Len() int { + return len(n) +} + +func (n ByName) Swap(i, j int) { + n[i], n[j] = n[j], n[i] +} + +func (n ByName) Less(i, j int) bool { + return n[i].Name < n[j].Name +} diff --git a/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/types.go b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/types.go new file mode 100644 index 000000000000..58339044c589 --- /dev/null +++ b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/pkg/api/types.go @@ -0,0 +1,108 @@ +package api + +import "encoding/xml" + +// The below types are directly marshalled into XML. The types correspond to jUnit +// XML schema, but do not contain all valid fields. For instance, the class name +// field for test cases is omitted, as this concept does not directly apply to Go. +// For XML specifications see http://help.catchsoftware.com/display/ET/JUnit+Format +// or view the XSD included in this package as 'junit.xsd' + +// TestSuites represents a flat collection of jUnit test suites. +type TestSuites struct { + XMLName xml.Name `xml:"testsuites"` + + // Suites are the jUnit test suites held in this collection + Suites []*TestSuite `xml:"testsuite"` +} + +// TestSuite represents a single jUnit test suite, potentially holding child suites. +type TestSuite struct { + XMLName xml.Name `xml:"testsuite"` + + // Name is the name of the test suite + Name string `xml:"name,attr"` + + // NumTests records the number of tests in the TestSuite + NumTests uint `xml:"tests,attr"` + + // NumSkipped records the number of skipped tests in the suite + NumSkipped uint `xml:"skipped,attr"` + + // NumFailed records the number of failed tests in the suite + NumFailed uint `xml:"failures,attr"` + + // Duration is the time taken in seconds to run all tests in the suite + Duration float64 `xml:"time,attr"` + + // Properties holds other properties of the test suite as a mapping of name to value + Properties []*TestSuiteProperty `xml:"properties,omitempty"` + + // TestCases are the test cases contained in the test suite + TestCases []*TestCase `xml:"testcase"` + + // Children holds nested test suites + Children []*TestSuite `xml:"testsuite"` +} + +// TestSuiteProperty contains a mapping of a property name to a value +type TestSuiteProperty struct { + XMLName xml.Name `xml:"property"` + + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +// TestCase represents a jUnit test case +type TestCase struct { + XMLName xml.Name `xml:"testcase"` + + // Name is the name of the test case + Name string `xml:"name,attr"` + + // Classname is an attribute set by the package type and is required + Classname string `xml:"classname,attr,omitempty"` + + // Duration is the time taken in seconds to run the test + Duration float64 `xml:"time,attr"` + + // SkipMessage holds the reason why the test was skipped + SkipMessage *SkipMessage `xml:"skipped"` + + // FailureOutput holds the output from a failing test + FailureOutput *FailureOutput `xml:"failure"` + + // SystemOut is output written to stdout during the execution of this test case + SystemOut string `xml:"system-out,omitempty"` + + // SystemErr is output written to stderr during the execution of this test case + SystemErr string `xml:"system-err,omitempty"` +} + +// SkipMessage holds a message explaining why a test was skipped +type SkipMessage struct { + XMLName xml.Name `xml:"skipped"` + + // Message explains why the test was skipped + Message string `xml:"message,attr,omitempty"` +} + +// FailureOutput holds the output from a failing test +type FailureOutput struct { + XMLName xml.Name `xml:"failure"` + + // Message holds the failure message from the test + Message string `xml:"message,attr"` + + // Output holds verbose failure output from the test + Output string `xml:",chardata"` +} + +// TestResult is the result of a test case +type TestResult string + +const ( + TestResultPass TestResult = "pass" + TestResultSkip TestResult = "skip" + TestResultFail TestResult = "fail" +) diff --git a/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/test/test_test.go b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/test/test_test.go new file mode 100644 index 000000000000..21f4695e8a4f --- /dev/null +++ b/vendor/github.com/openshift/service-serving-cert-signer/tools/gotest2junit/test/test_test.go @@ -0,0 +1,18 @@ +// +build output + +package test + +import ( + "testing" +) + +func TestFoo(t *testing.T) { + t.Run("panic", func(t *testing.T) { + panic("here") + }) + t.Run("pass", func(t *testing.T) { + }) + t.Run("skip", func(t *testing.T) { + t.Skip("skipped") + }) +}